diff --git "a/3427.jsonl" "b/3427.jsonl" new file mode 100644--- /dev/null +++ "b/3427.jsonl" @@ -0,0 +1,655 @@ +{"seq_id":"1156289","text":"import pycountry\nimport xml.dom.minidom\nfrom upload_file import get_blob\n\n\ndef parse_xml_response_by_path(fs):\n needed_keys = {\n 'LastName',\n 'GivenName',\n 'Nationality',\n 'BirthDate',\n 'ExpiryDate',\n 'DocumentNumber',\n }\n dom = xml.dom.minidom.parseString(fs)\n parsed_resp = dict()\n for elem in dom.firstChild.getElementsByTagName('field'):\n attr = elem.getAttribute('type')\n if attr in needed_keys:\n value = elem.getElementsByTagName('value')[0].firstChild.data\n if attr in ('BirthDate', 'ExpiryDate'):\n value_year = value[0:2]\n value_month = value[2:4]\n value_date = value[4:6]\n if attr == 'ExpiryDate':\n value_year = '20' + value_year\n if attr == 'BirthDate':\n if int(value_year) > 20:\n value_year = '19' + value_year\n else:\n value_year = '20' + value_year\n value = '{}.{}.{}'.format(value_date, value_month, value_year)\n if attr == 'Nationality':\n value = \"{} / {}\".format(pycountry.countries.get(alpha_3=value).alpha_2,\n pycountry.countries.get(alpha_3=value).name)\n parsed_resp[attr] = value\n return parsed_resp\n","sub_path":"src/parse_xml_response.py","file_name":"parse_xml_response.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"529803278","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nimport re\nimport unicodedata\n\nfrom food.settings import NO_DATA, STOPWORDS\n\nclass SearchParser:\n \"\"\"\n Parser class\n To manage user question parsing\n \"\"\"\n def __init__(self):\n \"\"\" Constructor \"\"\"\n self.cleaned_string = ''\n self.cleaned_string_words_list = []\n\n def refuse_empty_string(self, raw_string):\n \"\"\"\n We check that the user input value is not empty or made up of spaces\n :param: raw_string is a string\n :return: a string \"no_data\" indicating that the response is empty\n or consists of spaces\n :rtype: string\n \"\"\"\n if not raw_string or raw_string.strip() == \"\":\n return NO_DATA\n else:\n return raw_string\n\n def remove_accented_characters(self, raw_string):\n \"\"\"\n String cleaning to facilitate comparisons, step 1\n :param: raw_string is a string\n :return: a string without accented characters and in lower case\n :rtype: string\n Example : \"Pâte à tartiner\" --> \"pate a tartiner\"\n \"\"\"\n trimmed_string = raw_string.strip()\n # We replace the accented characters and use lower case\n no_accent_data = ''.join(\n (c for c in unicodedata.normalize('NFD', trimmed_string)\n if unicodedata.category(c) != 'Mn')\n )\n\n return no_accent_data.lower()\n\n def remove_special_characters(self, raw_string):\n \"\"\"\n String cleaning to facilitate comparisons, step 2\n :param: raw_string is a string\n :return: a string without special characters\n :rtype: string\n Example : \"l'horloge de# no$tre-dame tourne ?\" --> \"l'horloge de notre-dame tourne\"\n \"\"\"\n # We delete the letters with quote (l', d', etc.)\n no_quote_letter_data = re.sub(r\"(\\s[a-z])'\", \" \", raw_string)\n # We delete the special characters except the hyphen\n return re.sub(r'[^\\-\\'\\,\\w\\s]','',no_quote_letter_data)\n\n def get_cleaned_data_list(self, raw_string):\n \"\"\"\n Cutting the string into words\n :param: raw_string is a string\n :return: a list containing all the words of the given string\n :rtype: list\n \"\"\"\n # We transform the string into a list of words\n cleaned_data_temp_list = []\n cleaned_data_list = []\n cleaned_data_temp_list = raw_string.split(' ')\n # We delete the duplicates\n cleaned_data_list = list(OrderedDict.fromkeys(cleaned_data_temp_list))\n # We delete the empty items\n value_to_delete = ''\n cleaned_data_list = [i for i in cleaned_data_list if i != value_to_delete]\n\n return cleaned_data_list\n\n def get_clean_stopwords_list(self):\n \"\"\"\n Remove accents in predefined stopwords list\n :return: a clean stopwords list without accents\n :rtype: list\n \"\"\"\n clean_stopwords_list = []\n for word in STOPWORDS:\n clean_word = self.remove_accented_characters(word)\n clean_stopwords_list.append(clean_word.strip())\n\n # We delete the duplicates\n clean_stopwords_list = list(set(clean_stopwords_list))\n\n return clean_stopwords_list\n\n def remove_stopwords(self, raw_list, clean_stopwords_list):\n \"\"\"\n Removal of unnecessary words from a pre-defined list\n :param: cleaned_data_list is a list of cleaned words (strings)\n :return: a list containing only the important words\n :rtype: list\n \"\"\"\n # We determine the list of words to delete in the original list\n words_to_remove = list(set(raw_list) & set(clean_stopwords_list))\n\n # We remove unnecessary words from the original list\n for word in words_to_remove:\n raw_list.remove(word)\n\n self.cleaned_string_words_list = raw_list\n\n return self.cleaned_string_words_list\n\n def get_cleaned_string(self, raw_string):\n \"\"\"\n Reconstruction of a string from the list of important words\n :param: raw_string is a raw string to parse\n :return: a parsed/cleaned string without accents, special\n charatcters or unnecessary words\n :rtype: string\n \"\"\"\n if self.refuse_empty_string(raw_string) == NO_DATA:\n print(\"La question de l'utilisateur est vide ou constituée d'espaces.\")\n return NO_DATA\n\n # We remove any accents and special characters (punctuation and others)\n no_accent_data = self.remove_accented_characters(raw_string)\n no_special_characters_data = self.remove_special_characters(no_accent_data)\n # We get the original cleaned word list\n cleaned_data_list = self.get_cleaned_data_list(no_special_characters_data)\n # A tuple is retrieved from the list of filtered words\n clean_stopwords_list = self.get_clean_stopwords_list()\n\n return list(tuple(self.remove_stopwords(cleaned_data_list, clean_stopwords_list)))\n","sub_path":"food/search_parser.py","file_name":"search_parser.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"85801902","text":"import pandas as pd\nfrom recovery.models import GoldCertificate, PMR\nfrom treatment.models import DayOfWork\nfrom django.db import IntegrityError\nfrom PyPDF2 import PdfFileReader\n\n\ndef read_pmr(fp):\n text = ''\n with open(fp, 'rb') as f:\n pdf = PdfFileReader(f)\n page = pdf.getPage(0)\n text = page.extractText()\n t = text.split(\" \")\n\n pmr = {\n \"deposit_id\": get_deposit_id(t),\n \"cust_ref\": get_cust_ref(t),\n \"refining_fee\": get_refining_fee(t).replace(\",\", \"\"),\n \"copper_fee\": get_copper_fee(t).replace(\",\", \"\"),\n \"iron_fee\": get_iron_fee(t).replace(\",\", \"\"),\n \"nickel_fee\": get_nickel_fee(t).replace(\",\", \"\"),\n \"silver_swap\": get_silver_swap(t).replace(\",\", \"\"),\n \"gold_swap\": get_gold_swap(t).replace(\",\", \"\"),\n \"official_weight\": get_gold_official_weight(t).replace(\",\", \"\"),\n \"exchange_rate\": get_exchange_rate(t).replace(\",\", \"\"),\n \"dispatch_weight\": get_dispatch_weight(t).replace(\",\", \"\"),\n \"deposit_date\": get_deposit_date(t)\n }\n return pmr\n\ndef save_pmr(d):\n\n pmr = PMR()\n pmr.deposit_id = d[\"deposit_id\"]\n pmr.cust_ref = d[\"cust_ref\"]\n pmr.refining_fee = d[\"refining_fee\"]\n pmr.copper_fee = d[\"copper_fee\"]\n pmr.nickel_fee = d[\"nickel_fee\"]\n pmr.iron_fee = d[\"iron_fee\"]\n pmr.silver_swap_fee = d[\"silver_swap\"]\n pmr.gold_swap_fee = d[\"gold_swap\"]\n pmr.official_weight = d[\"official_weight\"]\n pmr.exchange_rate = d[\"exchange_rate\"]\n pmr.dispatch_weight = d[\"dispatch_weight\"]\n pmr.deposit_date = d[\"deposit_date\"]\n try:\n pmr.save()\n except IntegrityError:\n pass\n\ndef get_deposit_date(text_str_lst):\n date_lst = containing_str(text_str_lst, \"Date\")\n date = date_lst[0]\n date = date.split(\"Date\")[1]\n date = date.split(\"Customer\")[0]\n return date.strip()\n\ndef get_dispatch_weight(text_str_lst):\n w_list = containing_str(text_str_lst, \"Weight\")\n w = w_list[0]\n w = w.split(\"Weight\")[1]\n w = w.split(\"Dry\")[0]\n return w.strip()\n\ndef get_exchange_rate(text_str_lst):\n rate_lst = containing_str(text_str_lst, \"Exchange\")\n rate = rate_lst[0]\n rate = rate.split(\"Exchange\")[1]\n rate = rate.split(\"Company\")[0]\n return rate.strip()\n \n\ndef get_gold_official_weight(text_str_lst):\n stats_lst = containing_str(text_str_lst, \"GOLD\")\n stats = stats_lst[0]\n stats = stats.split(\"GOLD\")[1]\n weight = stats[:6]\n return weight.strip()\n\ndef get_gold_swap(text_str_lst):\n au_swap_lst = containing_str(text_str_lst, \"Au\")\n au_swap = au_swap_lst[0]\n au_swap = au_swap.split(\"ZAR\")[0]\n au_swap = au_swap[10:]\n return au_swap.strip()\n\ndef get_silver_swap(text_str_lst):\n ag_swap_lst = containing_str(text_str_lst, \"Ag\")\n ag_swap = ag_swap_lst[0]\n ag_swap = ag_swap.split(\"ZAR\")[0]\n ag_swap = ag_swap[10:]\n return ag_swap.strip()\n\ndef get_iron_fee(text_str_lst):\n iron_fee_lst = containing_str(text_str_lst, \"(Fe)\")\n try:\n fee = iron_fee_lst[0]\n except IndexError:\n return str(0.0)\n fee = fee.split(\"ZAR\")[0]\n fee = fee[12:]\n return fee.strip()\n\ndef get_copper_fee(text_str_lst):\n copper_fee_lst = containing_str(text_str_lst, \"(Cu)\")\n try:\n fee = copper_fee_lst[0]\n except IndexError:\n return str(0.0)\n fee = fee.split(\"ZAR\")[0]\n fee = fee[12:]\n return fee.strip()\n\ndef get_nickel_fee(text_str_lst):\n nickel_fee_lst = containing_str(text_str_lst, \"(Ni)\")\n try:\n fee = nickel_fee_lst[0]\n except IndexError:\n return str(0.0)\n fee = fee.split(\"ZAR\")[0]\n fee = fee[12:]\n return fee.strip()\n\ndef get_refining_fee(text_str_lst):\n refining_fee_lst = containing_str(text_str_lst, \"Fee\")\n fee = refining_fee_lst[0]\n if len(fee) == 0:\n refining_fee_lst = containing_str(text_str_lst, \"Charge\")\n fee = refining_fee_lst[1]\n fee = fee.split(\"ZAR\")[0]\n fee = fee[11:]\n return fee.strip()\n\ndef get_deposit_id(text_str_lst):\n batch_lst = containing_str(text_str_lst, \"Batch\")\n _, b = batch_lst[0].split(\"#\")\n b, _ = b.split(\"D\")\n return b.strip()\n\ndef get_cust_ref(text_str_lst):\n ref_lst = containing_str(text_str_lst, \"Reference\")\n _, r = ref_lst[0].split('ce')\n r = r.split('Cust')[0]\n return r.strip()\n\ndef containing_str(text_str_lst, s_str):\n batch_lst = []\n for s in text_str_lst:\n if s_str in s:\n batch_lst.append(s)\n return batch_lst\n\n\n'''\nGold Certificate\n'''\ndef read_audit_certificate(fp):\n\n df = pd.read_excel(fp, sheet_name=\"EXCEPTIONS\")\n _, c = df.shape\n df.columns = range(c)\n df.drop([2, 4], inplace=True, axis=1)\n df1 = df[[0, 1]]\n df1.dropna(inplace=True, axis=0)\n df2 = df[[3, 5]]\n df2.dropna(inplace=True, axis=0)\n cols = ['label', 'value']\n df1.columns = cols\n df2.columns = cols\n df1.drop(df1.tail(1).index, inplace=True)\n df2.drop(df2.tail(2).index, inplace=True)\n df = pd.concat([df1, df2], axis=0)\n df.set_index('label', inplace=True)\n return df\n\n\n\ndef save_gold_certificate(df):\n\n for x, y in df.iteritems():\n gc = GoldCertificate()\n try:\n gc.deposit_id = y['Deposit Number']\n gc.operator = y['Operator']\n gc.date = y['Date'].strftime(\"%Y-%m-%d\")\n gc.time = y['Time']\n gc.Au = y['Au']\n gc.Ag = y['Ag']\n gc.Cu = y['Cu']\n gc.Pb = y['Pb']\n gc.Zn = y['Zn']\n gc.Cr = y['Cr']\n gc.Ni = y['Ni']\n gc.Fe = y['Fe']\n gc.Co = y['Co']\n gc.Mn = y['Mn']\n gc.Sn = y['Sn']\n gc.Sb = y['Sb']\n gc.Te = y['Te']\n gc.Pt = y['Pt']\n gc.Pd = y['Pd']\n gc.Ir = y['Ir']\n except KeyError:\n print(\"[ Error ]: %s\" % y['Deposit Number'])\n pass\n try:\n gc.save()\n except IntegrityError:\n print(\"[ Error ]: %s\" % y['Deposit Number'])\n \n \ndef read_and_save_gold_certificate(fp):\n df = read_audit_certificate(fp)\n save_gold_certificate(df)\n\n\ndef read_and_save_pmr(fp):\n d = read_pmr(fp)\n print(d)\n save_pmr(d)","sub_path":"lin_dc/src/datamanager/input/xl/rand_refinery.py","file_name":"rand_refinery.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"97593160","text":"from django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\n\ndef create_users(number):\n for i in range(number):\n name = f'ala{i}'\n user = User.objects.create_user(name, f'{name}@example.com', 'kot')\n user.save()\n\ncreate_users(10)\nbetter_user = User.objects.create_user('Ola', 'staff@example.com', 'kot')\nbetter_user.is_staff = True\nbetter_user.save()\n\n","sub_path":"list3/1/create_users.py","file_name":"create_users.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"287836564","text":"\nimport asyncio\nimport enum\nimport uuid\nimport logging\nimport json\n\nfrom aiosmb import logger\nfrom aiosmb.examples.scancommons.targetgens import *\nfrom aiosmb.examples.scancommons.internal import *\nfrom aiosmb.examples.scancommons.utils import *\nfrom aiosmb.commons.utils.univeraljson import UniversalEncoder\n\nfrom aiosmb.commons.connection.url import SMBConnectionURL\nfrom aiosmb.commons.interfaces.machine import SMBMachine\nfrom aiosmb.protocol.common import SMB_NEGOTIATE_PROTOCOL_TEST, NegotiateDialects\n\nfrom tqdm import tqdm\n\nclass SMBProtocolEnumProgressResult:\n\tdef __init__(self, total_targets, total_finished, gens_finished, target):\n\t\tself.total_targets = total_targets\n\t\tself.total_finished = total_finished\n\t\tself.gens_finished = gens_finished\n\t\tself.target = target\n\nclass SMBProtocolEnumResultInner:\n\tdef __init__(self, target_id, target, result, error = None, status = EnumResultStatus.RESULT):\n\t\tself.target_id = target_id\n\t\tself.target = target\n\t\tself.error = error\n\t\tself.result = result\n\t\tself.status = status\n\nclass SMBProtocolEnumData:\n\tdef __init__(self, target_id, target, protocol, sign_en, sign_req, reply):\n\t\tself.target_id = target_id\n\t\tself.target = target\n\t\tself.protocol = protocol\n\t\tself.sign_en = sign_en\n\t\tself.sign_req = sign_req\n\t\tself.reply = reply\n\nSMBPROTOCOLENUM_TSV_HDR = ['target', 'target_id', 'protocol', 'sigen', 'sigreq' ]\n\n\nclass SMBProtocolEnumResult:\n\tdef __init__(self, obj, otype):\n\t\tself.obj = obj\n\t\tself.otype = otype\n\n\tdef __str__(self):\n\t\tif self.otype == 'result':\n\t\t\treturn '[R] %s | %s | %s | %s | %s' % (self.obj.target, self.obj.target_id, self.obj.protocol, self.obj.sign_en, self.obj.sign_req)\n\t\n\t\telif self.otype == 'error':\n\t\t\treturn '[E] %s | %s | %s' % (self.obj.target, self.obj.target_id, self.obj.error)\n\n\t\telif self.otype == 'progress':\n\t\t\treturn '[P] %s/%s | %s | %s' % (self.obj.total_targets, self.obj.total_finished, str(self.obj.gens_finished), self.obj.target)\n\n\t\telse:\n\t\t\treturn '[UNK]'\n\t\n\tdef to_dict(self):\n\t\tif self.otype == 'result':\n\t\t\tt = {}\n\t\t\tt['target'] = self.obj.target\n\t\t\tt['target_id'] = self.obj.target_id\n\t\t\tt['protocol'] = self.obj.protocol\n\t\t\tt['sigen'] = self.obj.sign_en\n\t\t\tt['sigreq'] = self.obj.sign_req\n\t\t\treturn t\n\t\treturn {}\n\t\n\tdef to_json(self):\n\t\tdd = self.to_dict()\n\t\treturn json.dumps(dd, cls = UniversalEncoder)\n\n\tdef to_tsv(self, hdrs = SMBPROTOCOLENUM_TSV_HDR, separator = '\\t'):\n\t\tif self.otype == 'result':\n\t\t\tdd = self.to_dict()\n\t\t\tdata = [ str(dd[x]) for x in hdrs ]\n\t\t\treturn separator.join(data)\n\n\t\treturn ''\n\nclass SMBProtocolEnum:\n\tdef __init__(self, worker_count = 100, timeout = 5, only_signing = False, protocols = SMB_NEGOTIATE_PROTOCOL_TEST, exclude_target = [], show_pbar = False, ext_result_q = None, output_type = 'str', out_file = None):\n\t\tself.target_gens = []\n\t\tself.timeout = timeout\n\t\tself.worker_count = worker_count\n\t\tself.task_q = None\n\t\tself.res_q = None\n\t\tself.exclude_target = exclude_target\n\t\tself.workers = []\n\t\tself.result_processing_task = None\n\t\tself.show_pbar = show_pbar\n\t\tself.ext_result_q = ext_result_q\n\t\tself.output_type = output_type\n\t\tself.out_file = out_file\n\t\tself.__gens_finished = False\n\t\tself.__total_targets = 0\n\t\tself.__total_finished = 0\n\t\tself.protocols = protocols\n\t\tself.only_signing = only_signing\n\n\tasync def __executor(self, tid, target):\n\t\ttry:\n\t\t\tfor protocol in self.protocols:\n\t\t\t\tsmb_mgr = SMBConnectionURL('smb2+ntlm-password://%s/?timeout=%s' % (target, self.timeout))\n\t\t\t\tconnection = smb_mgr.create_connection_newtarget(target)\n\t\t\t\tres, sign_en, sign_req, rply, err = await connection.protocol_test([protocol])\n\t\t\t\tif err is not None:\n\t\t\t\t\traise err\n\t\t\t\t\n\t\t\t\ter = SMBProtocolEnumResultInner(tid, target, (protocol, res, sign_en, sign_req, rply, err))\n\t\t\t\tawait self.res_q.put(er)\n\t\t\t\tif self.only_signing is True:\n\t\t\t\t\treturn\n\t\texcept asyncio.CancelledError:\n\t\t\treturn\n\t\texcept Exception as e:\n\t\t\tawait self.res_q.put(SMBProtocolEnumResultInner(tid, target, None, error = e, status = EnumResultStatus.ERROR))\n\t\tfinally:\n\t\t\tawait self.res_q.put(SMBProtocolEnumResultInner(tid, target, None, status = EnumResultStatus.FINISHED))\n\n\tasync def worker(self):\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tindata = await self.task_q.get()\n\t\t\t\tif indata is None:\n\t\t\t\t\treturn\n\t\t\t\t\n\t\t\t\ttid, target = indata\n\t\t\t\ttry:\n\t\t\t\t\tawait asyncio.wait_for(self.__executor(tid, target), timeout=10)\n\t\t\t\texcept asyncio.CancelledError:\n\t\t\t\t\treturn\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass\n\t\texcept asyncio.CancelledError:\n\t\t\treturn\n\t\t\t\t\n\t\texcept Exception as e:\n\t\t\treturn e\n\n\tasync def result_processing(self):\n\t\ttry:\n\t\t\tpbar = None\n\t\t\tif self.show_pbar is True:\n\t\t\t\tpbar = {}\n\t\t\t\tpbar['targets'] = tqdm(desc='Targets ', unit='', position=0)\n\t\t\t\tpbar['smb1'] = tqdm(desc='SMBv1 ', unit='', position=1)\n\t\t\t\tpbar['signenabled'] = tqdm(desc='Singing enabled ', unit='', position=2)\n\t\t\t\tpbar['signnotenforced'] = tqdm(desc='Singning NOT enforced ', unit='', position=3)\n\t\t\t\tpbar['connerrors'] = tqdm(desc='Conn Errors ', unit='', position=4)\n\t\t\t\n\t\t\tout_buffer = []\n\t\t\tfinal_iter = False\n\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tif self.__gens_finished is True and self.show_pbar is True and pbar['targets'].total is None:\n\t\t\t\t\t\tpbar['targets'].total = self.__total_targets\n\t\t\t\t\t\tfor key in pbar:\n\t\t\t\t\t\t\tpbar[key].refresh()\n\t\t\t\t\tif self.ext_result_q is not None:\n\t\t\t\t\t\tout_buffer = []\n\n\t\t\t\t\tif len(out_buffer) >= 1000 or final_iter and self.ext_result_q is None:\n\t\t\t\t\t\tout_data = ''\n\t\t\t\t\t\tif self.output_type == 'str':\n\t\t\t\t\t\t\tout_data = '\\r\\n'.join([str(x) for x in out_buffer])\n\t\t\t\t\t\telif self.output_type == 'tsv':\n\t\t\t\t\t\t\tfor res in out_buffer:\n\t\t\t\t\t\t\t\tt = res.to_tsv()\n\t\t\t\t\t\t\t\tif t == '':\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tout_data += '%s\\r\\n' % res.to_tsv()\n\t\t\t\t\t\telif self.output_type == 'json':\n\t\t\t\t\t\t\tfor res in out_buffer:\n\t\t\t\t\t\t\t\tt = res.to_json()\n\t\t\t\t\t\t\t\tif len(t) == 2:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tout_data += '%s\\r\\n' % t\n\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tout_data = '\\r\\n'.join(out_buffer)\n\n\t\t\t\t\t\tif self.out_file is not None:\n\t\t\t\t\t\t\twith open(self.out_file, 'a+', newline = '') as f:\n\t\t\t\t\t\t\t\tf.write(out_data)\n\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(out_data)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif self.show_pbar is True:\n\t\t\t\t\t\t\tfor key in pbar:\n\t\t\t\t\t\t\t\tpbar[key].refresh()\n\t\t\t\t\t\t\n\t\t\t\t\t\tout_buffer = []\n\t\t\t\t\t\tout_data = ''\n\t\t\t\t\t\n\t\t\t\t\tif final_iter:\n\t\t\t\t\t\tasyncio.create_task(self.terminate())\n\t\t\t\t\t\treturn\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\ter = await asyncio.wait_for(self.res_q.get(), timeout = 5)\n\t\t\t\t\texcept asyncio.TimeoutError:\n\t\t\t\t\t\tif self.show_pbar is True:\n\t\t\t\t\t\t\tfor key in pbar:\n\t\t\t\t\t\t\t\tpbar[key].refresh()\n\n\t\t\t\t\t\tif self.__total_finished == self.__total_targets and self.__gens_finished is True:\n\t\t\t\t\t\t\tfinal_iter = True\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tif er.status == EnumResultStatus.FINISHED:\n\t\t\t\t\t\tself.__total_finished += 1\n\t\t\t\t\t\tif self.show_pbar is True:\n\t\t\t\t\t\t\tpbar['targets'].update(1)\n\t\t\t\t\t\t\n\t\t\t\t\t\tobj = SMBProtocolEnumProgressResult(self.__total_targets, self.__total_finished, self.__gens_finished, er.target)\n\t\t\t\t\t\tif self.ext_result_q is not None:\n\t\t\t\t\t\t\tawait self.ext_result_q.put(SMBProtocolEnumResult(obj, 'progress'))\n\t\t\t\t\t\tout_buffer.append(SMBProtocolEnumResult(obj, 'progress'))\n\t\t\t\t\t\tif self.__total_finished == self.__total_targets and self.__gens_finished is True:\n\t\t\t\t\t\t\tfinal_iter = True\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telif er.status == EnumResultStatus.RESULT:\n\t\t\t\t\t\tprotocol, result, sign_en, sign_req, rply, err = er.result\n\t\t\t\t\t\tif protocol == NegotiateDialects.WILDCARD:\n\t\t\t\t\t\t\tprotocol = 'SMB1' #replacing this bc of logic in connection\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprotocol = protocol.name.upper()\n\t\t\t\t\t\tif result is True:\n\t\t\t\t\t\t\tif sign_en is True:\n\t\t\t\t\t\t\t\tsign_en = 'E'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsign_en = 'D'\n\t\t\t\t\t\t\tif sign_req is True:\n\t\t\t\t\t\t\t\tsign_req = 'REQ'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tsign_req = 'NOTREQ'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsign_en = None\n\t\t\t\t\t\t\tsign_req = None\n\t\t\t\t\t\t\n\t\t\t\t\t\tif self.show_pbar is True:\n\t\t\t\t\t\t\tif protocol == 'SMB1':\n\t\t\t\t\t\t\t\tpbar['smb1'].update(1)\n\t\t\t\t\t\t\tif sign_en == 'E':\n\t\t\t\t\t\t\t\tpbar['signenabled'].update(1)\n\t\t\t\t\t\t\tif sign_req == 'NOTREQ':\n\t\t\t\t\t\t\t\tpbar['signnotenforced'].update(1)\n\n\t\t\t\t\t\tdata = SMBProtocolEnumData(er.target_id, er.target, protocol, sign_en, sign_req, rply)\n\n\t\t\t\t\t\tif self.ext_result_q is not None:\n\t\t\t\t\t\t\tawait self.ext_result_q.put(SMBProtocolEnumResult(data, 'result'))\n\t\t\t\t\t\tout_buffer.append(SMBProtocolEnumResult(data, 'result'))\n\t\t\t\t\t\n\t\t\t\t\telif er.status == EnumResultStatus.ERROR:\n\t\t\t\t\t\tif self.ext_result_q is not None:\n\t\t\t\t\t\t\tawait self.ext_result_q.put(SMBProtocolEnumResult(er, 'error'))\n\t\t\t\t\t\tif self.show_pbar is True:\n\t\t\t\t\t\t\tpbar['connerrors'].update(1)\n\t\t\t\t\t\tout_buffer.append(SMBProtocolEnumResult(er, 'error'))\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\texcept asyncio.CancelledError:\n\t\t\t\t\treturn\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\n\t\texcept asyncio.CancelledError:\n\t\t\treturn\n\t\texcept Exception as e:\n\t\t\tlogger.exception('result_processing')\n\t\t\tasyncio.create_task(self.terminate())\n\n\tasync def terminate(self):\n\t\tfor worker in self.workers:\n\t\t\tworker.cancel()\n\t\tif self.result_processing_task is not None:\n\t\t\tself.result_processing_task.cancel()\n\n\tasync def setup(self):\n\t\ttry:\n\t\t\tif self.res_q is None:\n\t\t\t\tself.res_q = asyncio.Queue(self.worker_count)\n\t\t\t\tself.result_processing_task = asyncio.create_task(self.result_processing())\n\t\t\tif self.task_q is None:\n\t\t\t\tself.task_q = asyncio.Queue()\n\n\t\t\tfor _ in range(self.worker_count):\n\t\t\t\tself.workers.append(asyncio.create_task(self.worker()))\n\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn None, e\n\t\n\tasync def __generate_targets(self):\n\t\ttry:\t\t\n\t\t\tfor target_gen in self.target_gens:\n\t\t\t\tasync for uid, target, err in target_gen.generate():\n\t\t\t\t\tif err is not None:\n\t\t\t\t\t\tprint('Target gen error! %s' % err)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tif target in self.exclude_target:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tself.__total_targets += 1\n\t\t\t\t\tawait self.task_q.put((uid, target))\n\t\t\t\t\tawait asyncio.sleep(0)\n\n\t\t\tself.__gens_finished = True\n\t\texcept Exception as e:\n\t\t\tlogger.exception('targetgen')\n\t\n\tasync def run(self):\n\t\ttry:\n\t\t\t_, err = await self.setup()\n\t\t\tif err is not None:\n\t\t\t\traise err\n\t\t\t\n\t\t\tgen_task = asyncio.create_task(self.__generate_targets())\n\n\t\t\tawait asyncio.gather(*self.workers)\n\t\t\tawait self.result_processing_task\n\t\t\treturn True, None\n\n\t\texcept Exception as e:\n\t\t\tlogger.exception('run')\n\t\t\treturn None, e\n\nasync def amain():\n\timport argparse\n\timport sys\n\n\tparser = argparse.ArgumentParser(description='SMB Protocol enumerator. Tells which dialects suported by the remote end')\n\tparser.add_argument('-v', '--verbose', action='count', default=0)\n\tparser.add_argument('-w', '--smb-worker-count', type=int, default=100, help='Parallell count')\n\tparser.add_argument('-t', '--timeout', type=int, default=50, help='Timeout for each connection')\n\tparser.add_argument('--signing', action='store_true', help='Only check for the singing properties. (faster)')\n\tparser.add_argument('-s', '--stdin', action='store_true', help='Read targets from stdin')\n\tparser.add_argument('--json', action='store_true', help='Output in JSON format')\n\tparser.add_argument('--tsv', action='store_true', help='Output in TSV format. (TAB Separated Values)')\n\tparser.add_argument('--progress', action='store_true', help='Show progress bar')\n\tparser.add_argument('-o', '--out-file', help='Output file path.')\n\tparser.add_argument('targets', nargs='*', help = 'Hostname or IP address or file with a list of targets')\n\targs = parser.parse_args()\n\t\n\tif args.verbose >=1:\n\t\tlogger.setLevel(logging.INFO)\n\n\tif args.verbose > 2:\n\t\tlogger.setLevel(1) #enabling deep debug\n\t\tasyncio.get_event_loop().set_debug(True)\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\t\n\toutput_type = 'str'\n\tif args.json is True:\n\t\toutput_type = 'json'\n\tif args.tsv is True:\n\t\toutput_type = 'tsv'\n\n\tenumerator = SMBProtocolEnum(worker_count = args.smb_worker_count, timeout = args.timeout, only_signing = args.signing, show_pbar=args.progress, out_file=args.out_file, output_type=output_type)\n\n\tnotfile = []\n\tif len(args.targets) == 0 and args.stdin is True:\n\t\tenumerator.target_gens.append(ListTargetGen(sys.stdin))\n\telse:\n\t\tfor target in args.targets:\n\t\t\ttry:\n\t\t\t\tf = open(target, 'r')\n\t\t\t\tf.close()\n\t\t\t\tenumerator.target_gens.append(FileTargetGen(target))\n\t\t\texcept:\n\t\t\t\tnotfile.append(target)\n\t\n\tif len(notfile) > 0:\n\t\tenumerator.target_gens.append(ListTargetGen(notfile))\n\n\tif len(enumerator.target_gens) == 0:\n\t\tprint('[-] No suitable targets were found!')\n\t\treturn\n\t\t\n\tawait enumerator.run()\n\tif args.progress is False:\n\t\tprint('[+] Done!')\n\ndef main():\n\tasyncio.run(amain())\n\nif __name__ == '__main__':\n\tmain()","sub_path":"aiosmb/examples/smbprotocolenum.py","file_name":"smbprotocolenum.py","file_ext":"py","file_size_in_byte":12507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"467820087","text":"import pickle\r\n\r\n\r\ndef make_data(op=0):\r\n global binfile\r\n if op == 0:\r\n binfile = open(\"static/binary2.dat\", 'wb+')\r\n else:\r\n binfile = open(\"static/binary2.dat\", 'ab+')\r\n n = int(input(\"enter number of records: \"))\r\n print()\r\n for i in range(n):\r\n dict = {}\r\n cityname = input(\"enter city name: \")\r\n while 1:\r\n try:\r\n population = int(input(\"enter population of city\"))\r\n if population < 0:\r\n print(\"negative number detected. invalid input.\")\r\n continue\r\n break\r\n except:\r\n print(\"non-numeric input detected. please enter again.\")\r\n while 1:\r\n try:\r\n hospitals = int(input(\"enter number of hospitals in city\"))\r\n if hospitals < 0:\r\n print(\"negative number detected. invalid input.\")\r\n continue\r\n break\r\n except:\r\n print(\"non-numeric input detected. please enter again.\")\r\n while 1:\r\n try:\r\n schools = int(input(\"enter number of schools in city\"))\r\n if schools < 0:\r\n print(\"negative number detected. invalid input.\")\r\n continue\r\n break\r\n except:\r\n print(\"non-numeric input detected. please enter again.\")\r\n while 1:\r\n try:\r\n density = int(input(\"enter city's population density: \"))\r\n if density < 0:\r\n print(\"negative number detected. invalid input.\")\r\n continue\r\n break\r\n except:\r\n print(\"non-numeric input detected. please enter again.\")\r\n dict['city_name'] = cityname\r\n dict['population'] = population\r\n dict['hospitals'] = hospitals\r\n dict['school'] = schools\r\n dict['density'] = density\r\n pickle.dump(dict, binfile)\r\n print()\r\n\r\n\r\ndef get_data():\r\n cityname = input(\"enter cityname : \")\r\n global binfile\r\n binfile.seek(0)\r\n while 1:\r\n try:\r\n dict = pickle.load(binfile)\r\n if dict['city_name'] == cityname:\r\n print(\"record found. record is\", dict)\r\n print()\r\n break\r\n except EOFError:\r\n print(\"record not found.\")\r\n print()\r\n break\r\n\r\n\r\ndef update_data():\r\n dict_lst = []\r\n binfile.seek(0)\r\n while True:\r\n try:\r\n dict = pickle.load(binfile)\r\n dict_lst.append(dict)\r\n except EOFError:\r\n break\r\n binfile.seek(0)\r\n for dict in dict_lst:\r\n if dict['density'] in range(500, 1001):\r\n dict['city_name'] = \"unknown\"\r\n pickle.dump(dict, binfile)\r\n\r\n\r\ndef update_rec():\r\n cityname = input(\"enter city name of record to be modified: \")\r\n global binfile\r\n flag = False\r\n binfile.seek(0)\r\n while 1:\r\n try:\r\n dict = pickle.load(binfile)\r\n if dict['city_name'] == cityname:\r\n flag = True\r\n break\r\n except EOFError:\r\n print(\"no such record.\")\r\n break\r\n if flag:\r\n field = input(\"enter field to be modified: \")\r\n if field in dict.keys():\r\n data = input(\"enter new value: \")\r\n dict_lst = []\r\n binfile.seek(0)\r\n while 1:\r\n try:\r\n temp = pickle.load(binfile)\r\n dict_lst.append(temp)\r\n except EOFError:\r\n break\r\n binfile.seek(0)\r\n for i in range(len(dict_lst)):\r\n if dict_lst[i] == dict:\r\n dict_lst[i][field] = data\r\n for x in dict_lst:\r\n pickle.dump(x, binfile)\r\n break\r\n binfile.close()\r\n else:\r\n print(\"no such field in record.\")\r\n\r\n\r\nwhile 1:\r\n op = int(input(\"\"\"what do you want to do?\r\n0.create a new file\r\n1.add data to existing file\r\ninput=\"\"\"))\r\n if op in [0, 1]:\r\n break\r\n else:\r\n print(\"invalid input. accepted inputs are 0 and 1.\")\r\nmake_data(op)\r\nget_data()\r\nupdate_data()\r\nupdate_rec()\r\n","sub_path":"Class 12/File_Handling/Binary files/binary_2.py","file_name":"binary_2.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"373600407","text":"import os\nimport time\nimport unicodedata\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom hashlib import sha512\n\n\nOGONE_PSPID = os.environ.get(\"OGONE_PSPID_CONFERENCE\")\nOGONE_SALT = os.environ.get(\"OGONE_SALT\")\nOGONE_URL = os.environ.get(\"OGONE_URL\")\n\n\ndef get_absolute_uri():\n protocol = \"https://\" if settings.SESSION_COOKIE_SECURE else \"http://\"\n return protocol + Site.objects.get_current().domain\n\n\ndef process_ogone_parameters(parameters, user):\n \"\"\"\n This method checks if a minimum of parameters have been received,\n and then processes them: reorder parameters and include a SHA512 key built with all\n parameters. It adds also some common parameters, like language or currency.\n \"\"\"\n # Common parameters\n logo = \"https://gallery.mailchimp.com/3d4635dfec992c8c47c666ef5/images/51c538e3-2c1b-4a15-a6a3-6922e622fd85.png\"\n ogone_parameters = {\n \"CURRENCY\": \"EUR\",\n \"LANGUAGE\": \"en_US\",\n \"BGCOLOR\": \"#f5f5f5\",\n \"TXTCOLOR\": \"#222\",\n \"LOGO\": logo,\n }\n # User parameters\n ogone_parameters.update(\n {\n \"EMAIL\": user.email,\n \"CN\": unicodedata.normalize(\"NFKD\", user.profile.name).encode(\"ascii\", \"ignore\").decode(\"utf-8\").upper(),\n }\n )\n # Required parameters\n absolute_uri = get_absolute_uri()\n ogone_parameters.update(\n {\n \"PSPID\": parameters[\"PSPID\"],\n \"ORDERID\": str(parameters[\"ORDERID\"]) + \"/\" + str(int(time.time())),\n \"AMOUNT\": (parameters[\"AMOUNT\"] * 100),\n \"COM\": \"ID\" + str(parameters[\"ORDERID\"]),\n \"ACCEPTURL\": absolute_uri + parameters[\"RESULTURL\"],\n \"DECLINEURL\": absolute_uri + parameters[\"RESULTURL\"],\n }\n )\n # Generate SHA1 with sorted parameters\n string_to_hash = \"\"\n for key in sorted(ogone_parameters):\n string_to_hash += key + \"=\" + str(ogone_parameters[key]) + OGONE_SALT\n\n ogone_parameters.update({\"SHASIGN\": sha512(string_to_hash.encode(\"utf-8\")).hexdigest().upper()})\n return ogone_parameters\n\n\nclass Ogone(object):\n SUCCESS_STATUSES = (\"5\", \"51\", \"9\", \"91\")\n EXCEPTION_STATUSES = (\"52\", \"92\")\n DECLINE_STATUSES = \"2\"\n CANCEL_STATUSES = \"1\"\n INVALID_STATUSES = \"0\"\n","sub_path":"hipeac/tools/payments/legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184059648","text":"# -*- coding: utf-8 -*-\nfrom selenium.webdriver.support.wait import WebDriverWait as wait\nimport time, pytest\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\n@pytest.mark.parametrize(\"locator,url\", [\n (\"//ul[@id='main-menu']//a[.='Обзоры']\", \"article/2/list\"),\n (\"//ul[@id='main-menu']//a[.='Профессия']\", \"article/9/list\"),\n (\"//ul[@id='main-menu']//a[.='Слова']\", \"article/8/list\"),\n (\"//ul[@id='main-menu']//a[.='Мнение']\", \"article/7/list\"),\n (\"//ul[@id='main-menu']//a[.='Техника']\", \"article/10/list\"),\n (\"//ul[@id='main-menu']//a[.='Практика']\", \"article/11/list\"),\n (\"//ul[@id='main-menu']//a[.='Репортаж']\", \"article/12/list\"),\n (\"//ul[@id='main-menu']//a[.='Инфографика']\", \"article/13/list\"),\n (\"//ul[@id='main-menu']//a[.='Правила профессии']\", \"article/16/list\"),\n #(\"//ul[@id='main-menu']//a[.='Тест']\", \"article/17/list\"),\n ])\ndef test_articles_dropdown_navigation(app, locator, url):\n wd = app.wd\n actions = ActionChains(wd)\n app.open_page(\"\") # home page\n time.sleep(2)\n menu = wd.find_element_by_xpath(\"//ul[@id='main-menu']//a[.='Статьи']\")\n actions.move_to_element(menu).perform()\n wait(wd, 10).until(lambda s: wd.find_element_by_xpath(str(locator)))\n wd.find_element_by_xpath(str(locator)).click()\n wait(wd, 10).until(lambda s: wd.find_element_by_xpath(\"//h1\"))\n assert wd.current_url == app.base_url + url","sub_path":"tests/articles_links_navigation.py","file_name":"articles_links_navigation.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"363111841","text":"import sqlite3\n\ndef create_sample():\n # DB接続\n conn = sqlite3.connect('sqlitefile.db')\n # → ファイルがない場合、勝手に作られる\n\n # カーソルを作成\n cur = conn.cursor()\n\n # SQL発行\n # drop table\n cur.execute(\n 'DROP TABLE IF EXISTS log_counts;'\n )\n # create table\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS log_counts ' +\n '( ' +\n 'dataset_name STR NOT NULL, ' +\n 'table_name STR NOT NULL, ' +\n 'y INT NOT NULL, ' +\n 'm INT NOT NULL, ' +\n 'd INT NOT NULL, ' +\n 'h INT NOT NULL, ' +\n 'cnt INT NOT NULL' +\n ');'\n )\n\n # DB切断\n conn.close()\n\n return None\n\n\ndef insert_sample():\n # DB接続\n conn = sqlite3.connect('sqlitefile.db')\n\n # カーソルを作成\n cur = conn.cursor()\n\n # insertするデータ\n log_recs = [\n ('ninjatribes_log', 'active_', 2020, 10, 31, 15, 7739),\n ('ninjatribes_log', 'active_', 2020, 10, 31, 16, 5548),\n ('ninjatribes_log', 'active_', 2020, 10, 31, 17, 3852),\n ('ninjatribes_log', 'active_', 2020, 10, 31, 18, 2718)\n ]\n\n # insert\n cur.executemany(\n 'INSERT INTO log_counts values (?,?,?,?,?,?,?);',\n log_recs\n )\n\n # commit\n conn.commit()\n\n # DB切断\n conn.close()\n\n return None\n\n\ndef select_sample():\n # DB接続\n conn = sqlite3.connect('sqlitefile.db')\n # → ファイルがない場合、勝手に作られる\n\n # カーソルを作成\n cur = conn.cursor()\n\n # select\n table = cur.execute('select * from log_counts')\n recs = table.fetchall()\n for rec in recs:\n print(rec)\n print(type(rec)) # tuple型になっている\n print(f'{rec[2]}-{rec[3]}-{rec[4]}({rec[5]}): {rec[6]}')\n\n # DB切断\n conn.close()\n\n return None\n\n\nif __name__=='__main__':\n # コメントアウトを色々変えてお試しください\n #create_sample()\n #insert_sample()\n select_sample()\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84354694","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport pandas as pd \nimport numpy as np \nfrom datetime import datetime\nfrom scipy.stats.mstats import gmean\nfrom dateutil.relativedelta import relativedelta\nimport os\n\nfrom server.models.portfolio.stats import *\nfrom server.models.portfolio.cost import costs\nfrom server.models.portfolio.tiingo import get_data\nfrom server.models.portfolio.optimize import optimize\nfrom server.models.portfolio.bl import bl, get_mkt_cap\nfrom server.models.portfolio.rs import fama_french, regime_switch, current_regime, business_days, expected_returns, covariance\n\n\n\nclass RNN(nn.Module):\n\n\tdef __init__(self, embed_size,\n\t\t\t\t num_output,\n\t\t\t\t rnn_model = 'GRU',\n\t\t\t\t use_last = True,\n\t\t\t\t padding_index = 0,\n\t\t\t\t hidden_size = 64,\n\t\t\t\t num_layers = 1,\n\t\t\t\t batch_first = True):\n\n\t\tsuper(RNN, self).__init__()\n\t\tself.use_last = use_last\n\t\tself.drop_en = nn.Dropout(p = 0.6)\n\n\t\tself.end_date = datetime.now().strftime(\"%Y-%m-%d\")\n\t\tself.start_date = (datetime.strptime(self.end_date, \"%Y-%m-%d\") - relativedelta(years=6)).strftime(\"%Y-%m-%d\")\n\t\tself.tickers = list(pd.read_csv(os.getcwd() + r'/data/tickers.csv')['Tickers'])\n\t\t\n\t\tif rnn_model == 'LSTM':\n\t\t\tself.rnn = nn.LSTM(input_size = embed_size, hidden_size = hidden_size,\n\t\t\t\t\t\t\t num_layers = num_layers, dropout = 0.5,\n\t\t\t\t\t\t\t batch_first = True, bidirectional = False)\n\t\telif rnn_model == 'GRU':\n\t\t\tself.rnn = nn.GRU(input_size = embed_size, hidden_size = hidden_size,\n\t\t\t\t\t\t\t num_layers = num_layers, dropout = 0.5,\n\t\t\t\t\t\t\t batch_first = True, bidirectional = False)\n\n\t\tself.bn2 = nn.BatchNorm1d(int(hidden_size))\n\t\tself.fc = nn.Linear(int(hidden_size), int(num_output))\n\n\n\tdef forward(self, x):\n\t\t#x_embed = self.drop_en(x)\n\t\t#x_embed = nn.functional.dropout(x)\n\t\tx_embed = x.view(28, x.shape[1], -1)\n\t\t#packed_input = pack_padded_sequence(x_embed, seq_lengths.cpu().numpy(), batch_first = True)\n\t\tx_embed = x_embed.type(torch.FloatTensor)\n\t\tpacked_output, ht = self.rnn(x_embed, None)\n\t\t#out_rnn, _ = pad_packed_sequence(packed_output, batch_first = True)\n\n\t\t#row_indices = torch.arange(0, x.size(0)).long()\n\t\t#col_indices = seq_lengths - 1\n\t\t#if next(self.parameters()).is_cuda():\n\t\t#\trow_indices = row_indices.cuda()\n\t\t\t#col_indices = col_indices.cuda()\n\t\t#if self.use_last:\n\t\t\t#last_tensor = out_rnn[row_indices, col_indices, :]\n\t\t\t#last_tensor = packed_output[row_indices, :]\n\t\t#else:\n\t\t\t#last_tensor = out_rnn[row_indices, :, :]\n\t\t\t#last_tensor = packed_output[row_indices, :]\n\t\t\t#last_tensor = torch.mean(last_tensor, dim = 1)\n#change labels to predict returns from stock price, but output mu_ml (do this in run_optimization - move it outside)\n\t\tfc_input = self.bn2(packed_output[-1].view(x.shape[1], -1))\n\t\tout = self.fc(fc_input)\n\t\t#out = self.run_optimization(self.end_date, self.start_date, out)\n\t\treturn out.view(-1)\n\n\n\tdef run_optimization(self, end_date, start_date, mu_ml):\n\t\trebalance_date = (datetime.strptime(end_date, \"%Y-%m-%d\") + relativedelta(months=6, days=1)).strftime(\"%Y-%m-%d\")\n\t\trebalance_date = datetime.strftime(pd.bdate_range(end_date, rebalance_date)[-1], \"%Y-%m-%d\")\n\t\tprices = get_data(self.tickers, 'adjClose', start_date, end_date, save=False)\n\t\tfactors = fama_french(start_date, end_date, save=False)\n\t\treturns = (prices / prices.shift(1) - 1).dropna()[:len(factors)]\n\t\tR = returns.values\n\t\t\n\t\t## *********************************************************************************************************************\n\t\t# factor model\n\t\t## *********************************************************************************************************************\n\n\t\tfactors.drop('RF', axis=1, inplace=True)\n\t\tF = np.hstack((np.atleast_2d(np.ones(factors.shape[0])).T, factors))\n\t\ttransmat, loadings, covarainces = regime_switch(R, F, self.tickers)\n\t\tbaseline = 30\n\t\tregime = current_regime(R, F, loadings, baseline)\n\n\n\t\t# get the number of days until the next scheduled rebalance\n\t\tdays = business_days((datetime.strptime(end_date, \"%Y-%m-%d\") + relativedelta(days=1)).strftime(\"%Y-%m-%d\"), rebalance_date)\n\n\t\t# get the estimate returns and covariances from the factor model\n\t\tmu_rsfm = pd.DataFrame(days * expected_returns(F, transmat, loadings, regime), index=self.tickers, columns=['returns'])\n\t\tcov_rsfm = pd.DataFrame(days * covariance(R, F, transmat, loadings, covarainces, regime), index=self.tickers, columns=self.tickers)\n\n\t\t# write estimates to a csv file\n\t\tmu_rsfm.to_csv(os.getcwd() + r'/data/mu_rsfm.csv')\n\t\tcov_rsfm.to_csv(os.getcwd() + r'/data/cov_rsfm.csv')\n\t\tmktcap = get_mkt_cap(self.tickers, save=True)\n\n\t\t# calculate the market coefficient\n\t\tl = (gmean(factors.iloc[-days:,:]['MKT'] + 1,axis=0) - 1)/factors.iloc[-days:,:]['MKT'].var()\n\n\t\tmu_bl1, cov_bl1 = bl(tickers=self.tickers,\n\t\t l=l, tau=1,\n\t\t mktcap=mktcap,\n\t\t Sigma=returns.iloc[-days:,:].cov().values * days,\n\t\t P=np.identity(len(self.tickers)),\n\t\t Omega=np.diag(np.diag(cov_rsfm)),\n\t\t q=mu_rsfm.values,\n\t\t adjust=False)\n\n\t\t#mu_ml = mu_bl1.mul(pd.DataFrame(1 + np.random.uniform(-0.05, 0.1, len(tickers)), index=mu_bl1.index, columns=mu_bl1.columns))\n\t\tmu_ml = pd.DataFrame(mu_ml, columns = ['returns'])\n\t\tmu_bl2, cov_bl2 = bl(tickers=self.tickers,\n\t\t l=l, tau=1,\n\t\t mktcap=mktcap,\n\t\t Sigma=returns.iloc[-days:,:].cov().values * days,\n\t\t P=np.identity(len(self.tickers)),\n\t\t Omega=np.diag(np.diag(cov_rsfm)),\n\t\t q=mu_ml.values,\n\t\t adjust=True)\n\n\t\tcost = costs(tickers=self.tickers,\n\t\t cov=cov_rsfm,\n\t\t prices=prices.iloc[-2, :] if prices.iloc[-1, :].isnull().values.any() else prices.iloc[-1, :],\n\t\t start_date=(datetime.strptime(end_date, \"%Y-%m-%d\") - relativedelta(years=1)).strftime(\"%Y-%m-%d\"),\n\t\t end_date=end_date,\n\t\t alpha=5)\n\n\t\trisk_tolerance = [((1, 10), (0, 0.10)),\n\t ((5, 5), (0, 0.20)),\n\t ((10, 1), (-0.05, 0.30))]\n\n\t\tsoln = optimize(mu = (mu_bl1.values.ravel(), mu_bl2.values.ravel()),\n\t\t sigma = (cov_bl1.values, cov_bl2.values),\n\t\t alpha = (0.05, 0.10),\n\t\t return_target = (0.05, 0.05),\n\t\t costs = cost,\n\t\t prices = prices.iloc[-2, :].values if prices.iloc[-1, :].isnull().values.any() else prices.iloc[-1, :].values,\n\t\t gamma = risk_tolerance[2])\n\n\t\tx1 = pd.DataFrame(soln.x[:int(len(mu_bl1))], index=mu_bl1.index, columns=['weight'])\n\t\tx2 = pd.DataFrame(soln.x[int(len(mu_bl2)):], index=mu_bl2.index, columns=['weight'])\n\t\tprint('\\n\\n********************************************************************')\n\t\tprint('\\tperiod one results')\n\t\tprint('********************************************************************\\n')\n\n\t\t#print(x1)\n\n\t\tprint(\"\\nportfolio return: %f\" % (ret(mu_bl1, x1) * 100))\n\t\tprint(\"portfolio volatility: %f\" % (vol(cov_bl1, x1) * 100))\n\t\tprint(\"portfolio var%f: %f\" % (1-0.05, var(mu_bl1, cov_bl1, 0.05, x1)))\n\t\tprint(\"portfolio cvar%f: %f\" % (1-0.05, cvar(mu_bl1, cov_bl1, 0.05, x1)))\n\n\n\t\tprint('\\n\\n********************************************************************')\n\t\tprint('\\tperiod two results')\n\t\tprint('********************************************************************\\n')\n\n\t\t#print(x2)\n\n\t\tprint(\"\\nportfolio return: %f\" % (ret(mu_bl2, x1) * 100))\n\t\tprint(\"portfolio volatility: %f\" % (vol(cov_bl2, x1) * 100))\n\t\tprint(\"portfolio var%f: %f\" % (1-0.05, var(mu_bl2, cov_bl2, 0.05, x1)))\n\t\tprint(\"portfolio cvar%f: %f\" % (1-0.05, cvar(mu_bl2, cov_bl2, 0.05, x1)))\n\n\t\treturn (ret(mu_bl2, x1) * 100)\n\n\n","sub_path":"server/models/portfolio/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"302448198","text":"# Import module (Regular Expression).\nimport re\n\n# Get sentence from user.\noriginal = input(\"Please enter a sentence: \").strip().lower()\n\n# Split up sentence into words.\nwords = original.split()\n\n# Create list for new words.\nnew_words = []\n\n# Loop through words and convert to pig latin.\nfor word in words:\n\n # If the word starts with a vowel,\n # we simply add \"yay\" to the end.\n if word[0] in \"aeiou\":\n new_word = word + \"yay\"\n new_words.append(new_word)\n\n # If the word starts with a consonant,\n # we slice the word at the first vowel.\n else:\n vowel_position = 0\n for letter in word:\n if letter not in \"aeiou\":\n vowel_position += 1\n else:\n break\n\n # Slice and create new word.\n beginning = word[:vowel_position]\n the_rest = word[vowel_position:]\n new_word = the_rest + beginning + \"ay\"\n\n # Add new word to list.\n new_words.append(new_word)\n\n# Stick the new words together.\nnew_sentence = \" \".join(new_words)\n\n# Remove punctuation from new sentence.\nnew_sentence = re.sub(\"[\\.\\?\\!\\,]\", \"\", new_sentence)\n\n# Capitalize first word and add period to the end.\noutput = new_sentence.capitalize() + \".\"\n\n# Print output.\nprint(output)\n","sub_path":"pig_latin_translator/pig_latin_translator.py","file_name":"pig_latin_translator.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"505302943","text":"#!/usr/bin/python3\n'''\n This module handles all default RestFul API actions for Amenity objects.\n'''\nfrom flask import jsonify, abort, request\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models import Amenity\n\n\n@app_views.route('/amenities', methods=['GET'],\n strict_slashes=False)\ndef all_amenities_by_state():\n '''\n Retrieves all Amenity objects from storage\n '''\n amenities = storage.all(\"Amenity\")\n amenities_list = []\n for k, v in amenities.items():\n amenities_list.append(v.to_dict())\n\n return jsonify(amenities_list)\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef amenity_by_id(amenity_id):\n '''\n Retrieves a specified Amenity object from storage\n '''\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n format_amenity = amenity.to_dict()\n return jsonify(format_amenity)\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_amenity(amenity_id):\n '''\n Deletes a specified Amenity object from storage\n '''\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n storage.delete(amenity)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/amenities', methods=['POST'],\n strict_slashes=False)\ndef create_amenity():\n '''\n Creates a new Amenity object and saves it to storage\n '''\n if not request.json:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n else:\n amenity_dict = request.get_json()\n if \"name\" in amenity_dict:\n amenity_name = amenity_dict[\"name\"]\n amenity = Amenity(name=amenity_name)\n for k, v in amenity_dict.items():\n setattr(amenity, k, v)\n amenity.save()\n return jsonify(amenity.to_dict()), 201\n else:\n return jsonify({\"error\": \"Missing name\"}), 400\n\n\n@app_views.route('/amenities/', methods=['PUT'],\n strict_slashes=False)\ndef update_amenity(amenity_id):\n '''\n Updates an existing Amenity object and saves it to storage\n '''\n amenity = storage.get(\"Amenity\", amenity_id)\n if not amenity:\n abort(404)\n if not request.json:\n return jsonify({\"error\": \"Not a JSON\"}), 400\n\n req = request.get_json()\n for k, v in req.items():\n if k != \"id\" or k != \"created_at\" or k != \"updated_at\":\n setattr(amenity, k, v)\n amenity.save()\n\n return jsonify(amenity.to_dict()), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"523430751","text":"# -*- coding: utf-8 -*-\n#crawled 1 page as of now.need to crawl 2 more pages.--------------------\nimport scrapy\nfrom scrapy.spiders import CrawlSpider\nfrom scrapy.http import TextResponse,Request\nfrom urllib.parse import urljoin\n\nclass TripadvisorSpider(scrapy.Spider):\n name = 'tripadvisor'\n allowed_domains = ['tripadvisor.in']\n #start_urls = ['https://www.tripadvisor.in/Restaurants-g304552-Shimla_Shimla_District_Himachal_Pradesh.html']\n\t#//*[@id=\"taplc_restaurants_coverpage_content_0\"]/div[1]/div[2]/div/div[2]/div[3]/div/a/div/div/div/img\n\t#//*[@id=\"taplc_restaurants_coverpage_content_0\"]/div[1]/div[1]/div/div[2]/div[3]/div/a/div/div/div/img\n\t#//img/@src----earlier\n\t#//*[@class=\"poi\"]//a/@href\n\t\n def start_requests(self):\n url='https://www.tripadvisor.in/Restaurants-g304552-Shimla_Shimla_District_Himachal_Pradesh.html'\n yield scrapy.Request(url=url,callback=self.parse)\n\t\t\n def parse(self, response):\n l=response.xpath('//*[@class=\"title\"]/a/@href').extract()\n print(l)\n\t\t\n #print(\"------------------------------------------\")\n urls=[]\n for image in l:\n\t print(image)\n \turls.append(image)\n #print(\"------------------------------------------\")\n\t\t#to fetch all the links present(all data with 'href' tag)\n \n for i in urls:\n #print (response.urljoin(i))\n yield response.follow(response.urljoin(i),self.parse_about)\n\n def parse_about(self, response):\n def extract_with_css(query):\n return response.css(query).extract()\n \n\t\t#this function will fetch the content ,date and title of the headline\n res={\n 'name':extract_with_css('.heading_title::text'),\n #'rating':response.xpath('//*[@id=\"taplc_location_detail_header_restaurants_0\"]/div[1]/span[1]/div/div/span').extract_first()\n #'rating':extract_with_css('.ui_bubble_rating_45')\n\t\t\t 'review':''.join(extract_with_css('.partial_entry::text')),\n\t\t\t #''.join(review)\n #'rating':response.xpath('//div//span [contains(@class,\"ui_bubble_rating\")]')\n #'Rating': response.xpath('//div[@class=\"claim false\"]/span//text()').extract_first()\n }\n yield (res)","sub_path":"tripadvisor.py","file_name":"tripadvisor.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"385350299","text":"# Python 3.x\n\nimport re\nfrom aoc_utilities import *\nDAY = 20\n\n# Part 1\n\nteststr = \"\"\"p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0>\np=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0>\"\"\"\n\n\ndef solve1(lines):\n idmin = 1\n min = 10000\n for i, line in enumerate(lines):\n parts = line.split('a=<')\n if len(parts)>1:\n a = [int(x) for x in re.findall(r'\\d+', parts[1])]\n metric = sum(map(abs, a))\n if metric < min:\n idmin = i\n min = metric\n print(min)\n print(\"new leader @ metric {} for line #{} : {}\".format(metric, i, line))\n\n elif metric == min:\n print(\"equality @ metric {} for line #{} : {}\".format(metric, i, line))\n return idmin\n\n\n# assert(solve1(teststr.splitlines()) == 1)\n\n\nres = solve1((Input(DAY).readlines()))\nprint(res)\n\n\n\n# def solve2(input):\n# pass\n#\n# res = solve2(parse_words(Input(DAY).readline()))\n# print(res)\n","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456858285","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport blib, re\nimport pywikibot\nfrom arabiclib import reorder_shadda\n\ndef process_page(page, index, refrom, reto, pagetitle_sub, comment, lang_only,\n warn_on_no_replacement, verbose, do_reorder_shadda):\n pagetitle = str(page.title())\n def pagemsg(txt):\n blib.msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n if verbose:\n blib.msg(\"Processing %s\" % pagetitle)\n #blib.msg(\"From: [[%s]], To: [[%s]]\" % (refrom, reto))\n text = str(page.text)\n origtext = text\n if do_reorder_shadda:\n text = reorder_shadda(text)\n zipped_fromto = list(zip(refrom, reto))\n def replace_text(text):\n for fromval, toval in zipped_fromto:\n if pagetitle_sub:\n fromval = fromval.replace(pagetitle_sub, re.escape(pagetitle))\n toval = toval.replace(pagetitle_sub, pagetitle)\n text = re.sub(fromval, toval, text, 0, re.M)\n return text\n if not lang_only:\n text = replace_text(text)\n else:\n sec_to_replace = None\n foundlang = False\n sections = re.split(\"(^==[^=]*==\\n)\", text, 0, re.M)\n\n for j in range(2, len(sections), 2):\n if sections[j-1] == \"==%s==\\n\" % lang_only:\n if foundlang:\n pagemsg(\"WARNING: Found multiple %s sections, skipping page\" % lang_only)\n if warn_on_no_replacement:\n pagemsg(\"WARNING: No replacements made\")\n return\n foundlang = True\n sec_to_replace = j\n break\n\n if sec_to_replace is None:\n if warn_on_no_replacement:\n pagemsg(\"WARNING: No replacements made\")\n return\n sections[sec_to_replace] = replace_text(sections[sec_to_replace])\n text = \"\".join(sections)\n if warn_on_no_replacement and text == origtext:\n pagemsg(\"WARNING: No replacements made\")\n return text, comment or \"replace %s\" % (\", \".join(\"%s -> %s\" % (f, t) for f, t in zipped_fromto))\n\npa = blib.create_argparser(\"Search and replace on pages\", include_pagefile=True)\npa.add_argument(\"-f\", \"--from\", help=\"From regex, can be specified multiple times\",\n metavar=\"FROM\", dest=\"from_\", required=True, action=\"append\")\npa.add_argument(\"-t\", \"--to\", help=\"To regex, can be specified multiple times\",\n required=True, action=\"append\")\npa.add_argument(\"--comment\", help=\"Specify the change comment to use\")\npa.add_argument('--pagetitle', help=\"Value to substitute page title with\")\npa.add_argument('--lang-only', help=\"Only replace in the specified language section\")\npa.add_argument('--reorder-shadda', help=\"Reorder shadda + short vowel to fix Unicode bug\")\npa.add_argument('--warn-on-no-replacement', action=\"store_true\",\n help=\"Warn if no replacements made\")\nargs = pa.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfrom_ = list(args.from_)\nto = list(args.to)\n\nif len(from_) != len(to):\n raise ValueError(\"Same number of --from and --to arguments must be specified\")\n\ndef do_process_page(page, index, parsed):\n return process_page(page, index, from_, to, args.pagetitle, args.comment, args.lang_only,\n args.warn_on_no_replacement, args.verbose, args.reorder_shadda)\nblib.do_pagefile_cats_refs(args, start, end, do_process_page, edit=True)\n","sub_path":"rewrite.py","file_name":"rewrite.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189147496","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport json\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom frappe.utils import flt\n\nfrom verp import get_company_currency\nfrom verp.accounts.utils import get_balance_on\nfrom verp.accounts.report.bank_reconciliation_statement.bank_reconciliation_statement import get_entries, get_amounts_not_reflected_in_system\nfrom verp.accounts.doctype.bank_transaction.bank_transaction import get_paid_amount\n\n\nclass BankReconciliationTool(Document):\n\tpass\n\n@frappe.whitelist()\ndef get_bank_transactions(bank_account, from_date = None, to_date = None):\n\t# returns bank transactions for a bank account\n\tfilters = []\n\tfilters.append(['bank_account', '=', bank_account])\n\tfilters.append(['docstatus', '=', 1])\n\tfilters.append(['unallocated_amount', '>', 0])\n\tif to_date:\n\t\tfilters.append(['date', '<=', to_date])\n\tif from_date:\n\t\tfilters.append(['date', '>=', from_date])\n\ttransactions = frappe.get_all(\n\t\t'Bank Transaction',\n\t\tfields = ['date', 'deposit', 'withdrawal', 'currency',\n\t\t'description', 'name', 'bank_account', 'company',\n\t\t'unallocated_amount', 'reference_number', 'party_type', 'party'],\n\t\tfilters = filters\n\t)\n\treturn transactions\n\n@frappe.whitelist()\ndef get_account_balance(bank_account, till_date):\n\t# returns account balance till the specified date\n\taccount = frappe.db.get_value('Bank Account', bank_account, 'account')\n\tfilters = frappe._dict({\n\t\t\"account\": account,\n\t\t\"report_date\": till_date,\n\t\t\"include_pos_transactions\": 1\n\t})\n\tdata = get_entries(filters)\n\n\tbalance_as_per_system = get_balance_on(filters[\"account\"], filters[\"report_date\"])\n\n\ttotal_debit, total_credit = 0,0\n\tfor d in data:\n\t\ttotal_debit += flt(d.debit)\n\t\ttotal_credit += flt(d.credit)\n\n\tamounts_not_reflected_in_system = get_amounts_not_reflected_in_system(filters)\n\n\tbank_bal = flt(balance_as_per_system) - flt(total_debit) + flt(total_credit) \\\n\t\t+ amounts_not_reflected_in_system\n\n\treturn bank_bal\n\n\n@frappe.whitelist()\ndef update_bank_transaction(bank_transaction_name, reference_number, party_type=None, party=None):\n\t# updates bank transaction based on the new parameters provided by the user from Vouchers\n\tbank_transaction = frappe.get_doc(\"Bank Transaction\", bank_transaction_name)\n\tbank_transaction.reference_number = reference_number\n\tbank_transaction.party_type = party_type\n\tbank_transaction.party = party\n\tbank_transaction.save()\n\treturn frappe.db.get_all('Bank Transaction',\n\t\tfilters={\n\t\t\t'name': bank_transaction_name\n\t\t},\n\t\tfields=['date', 'deposit', 'withdrawal', 'currency',\n\t\t\t'description', 'name', 'bank_account', 'company',\n\t\t\t'unallocated_amount', 'reference_number',\n\t\t\t 'party_type', 'party'],\n\t)[0]\n\n\n@frappe.whitelist()\ndef create_journal_entry_bts( bank_transaction_name, reference_number=None, reference_date=None, posting_date=None, entry_type=None,\n\tsecond_account=None, mode_of_payment=None, party_type=None, party=None, allow_edit=None):\n\t# Create a new journal entry based on the bank transaction\n\tbank_transaction = frappe.db.get_values(\n\t\t\"Bank Transaction\", bank_transaction_name,\n\t\tfieldname=[\"name\", \"deposit\", \"withdrawal\", \"bank_account\"] ,\n\t\tas_dict=True\n\t)[0]\n\tcompany_account = frappe.get_value(\"Bank Account\", bank_transaction.bank_account, \"account\")\n\taccount_type = frappe.db.get_value(\"Account\", second_account, \"account_type\")\n\tif account_type in [\"Receivable\", \"Payable\"]:\n\t\tif not (party_type and party):\n\t\t\tfrappe.throw(_(\"Party Type and Party is required for Receivable / Payable account {0}\").format( second_account))\n\taccounts = []\n\t# Multi Currency?\n\taccounts.append({\n\t\t\t\"account\": second_account,\n\t\t\t\"credit_in_account_currency\": bank_transaction.deposit\n\t\t\t\tif bank_transaction.deposit > 0\n\t\t\t\telse 0,\n\t\t\t\"debit_in_account_currency\":bank_transaction.withdrawal\n\t\t\t\tif bank_transaction.withdrawal > 0\n\t\t\t\telse 0,\n\t\t\t\"party_type\":party_type,\n\t\t\t\"party\":party,\n\t\t})\n\n\taccounts.append({\n\t\t\t\"account\": company_account,\n\t\t\t\"bank_account\": bank_transaction.bank_account,\n\t\t\t\"credit_in_account_currency\": bank_transaction.withdrawal\n\t\t\t\tif bank_transaction.withdrawal > 0\n\t\t\t\telse 0,\n\t\t\t\"debit_in_account_currency\":bank_transaction.deposit\n\t\t\t\tif bank_transaction.deposit > 0\n\t\t\t\telse 0,\n\t\t})\n\n\tcompany = frappe.get_value(\"Account\", company_account, \"company\")\n\n\tjournal_entry_dict = {\n\t\t\"voucher_type\" : entry_type,\n\t\t\"company\" : company,\n\t\t\"posting_date\" : posting_date,\n\t\t\"cheque_date\" : reference_date,\n\t\t\"cheque_no\" : reference_number,\n\t\t\"mode_of_payment\" : mode_of_payment\n\t}\n\tjournal_entry = frappe.new_doc('Journal Entry')\n\tjournal_entry.update(journal_entry_dict)\n\tjournal_entry.set(\"accounts\", accounts)\n\n\n\tif allow_edit:\n\t\treturn journal_entry\n\n\tjournal_entry.insert()\n\tjournal_entry.submit()\n\n\tif bank_transaction.deposit > 0:\n\t\tpaid_amount = bank_transaction.deposit\n\telse:\n\t\tpaid_amount = bank_transaction.withdrawal\n\n\tvouchers = json.dumps([{\n\t\t\"payment_doctype\":\"Journal Entry\",\n\t\t\"payment_name\":journal_entry.name,\n\t\t\"amount\":paid_amount}])\n\n\treturn reconcile_vouchers(bank_transaction.name, vouchers)\n\n@frappe.whitelist()\ndef create_payment_entry_bts( bank_transaction_name, reference_number=None, reference_date=None, party_type=None, party=None, posting_date=None,\n\tmode_of_payment=None, project=None, cost_center=None, allow_edit=None):\n\t# Create a new payment entry based on the bank transaction\n\tbank_transaction = frappe.db.get_values(\n\t\t\"Bank Transaction\", bank_transaction_name,\n\t\tfieldname=[\"name\", \"unallocated_amount\", \"deposit\", \"bank_account\"] ,\n\t\tas_dict=True\n\t)[0]\n\tpaid_amount = bank_transaction.unallocated_amount\n\tpayment_type = \"Receive\" if bank_transaction.deposit > 0 else \"Pay\"\n\n\tcompany_account = frappe.get_value(\"Bank Account\", bank_transaction.bank_account, \"account\")\n\tcompany = frappe.get_value(\"Account\", company_account, \"company\")\n\tpayment_entry_dict = {\n\t\t\"company\" : company,\n\t\t\"payment_type\" : payment_type,\n\t\t\"reference_no\" : reference_number,\n\t\t\"reference_date\" : reference_date,\n\t\t\"party_type\" : party_type,\n\t\t\"party\" : party,\n\t\t\"posting_date\" : posting_date,\n\t\t\"paid_amount\": paid_amount,\n\t\t\"received_amount\": paid_amount\n\t}\n\tpayment_entry = frappe.new_doc(\"Payment Entry\")\n\n\n\tpayment_entry.update(payment_entry_dict)\n\n\tif mode_of_payment:\n\t\tpayment_entry.mode_of_payment = mode_of_payment\n\tif project:\n\t\tpayment_entry.project = project\n\tif cost_center:\n\t\tpayment_entry.cost_center = cost_center\n\tif payment_type == \"Receive\":\n\t\tpayment_entry.paid_to = company_account\n\telse:\n\t\tpayment_entry.paid_from = company_account\n\n\tpayment_entry.validate()\n\n\tif allow_edit:\n\t\treturn payment_entry\n\n\tpayment_entry.insert()\n\n\tpayment_entry.submit()\n\tvouchers = json.dumps([{\n\t\t\"payment_doctype\":\"Payment Entry\",\n\t\t\"payment_name\":payment_entry.name,\n\t\t\"amount\":paid_amount}])\n\treturn reconcile_vouchers(bank_transaction.name, vouchers)\n\n@frappe.whitelist()\ndef reconcile_vouchers(bank_transaction_name, vouchers):\n\t# updated clear date of all the vouchers based on the bank transaction\n\tvouchers = json.loads(vouchers)\n\ttransaction = frappe.get_doc(\"Bank Transaction\", bank_transaction_name)\n\tif transaction.unallocated_amount == 0:\n\t\tfrappe.throw(_(\"This bank transaction is already fully reconciled\"))\n\ttotal_amount = 0\n\tfor voucher in vouchers:\n\t\tvoucher['payment_entry'] = frappe.get_doc(voucher['payment_doctype'], voucher['payment_name'])\n\t\ttotal_amount += get_paid_amount(frappe._dict({\n\t\t\t'payment_document': voucher['payment_doctype'],\n\t\t\t'payment_entry': voucher['payment_name'],\n\t\t}), transaction.currency)\n\n\tif total_amount > transaction.unallocated_amount:\n\t\tfrappe.throw(_(\"The Sum Total of Amounts of All Selected Vouchers Should be Less than the Unallocated Amount of the Bank Transaction\"))\n\taccount = frappe.db.get_value(\"Bank Account\", transaction.bank_account, \"account\")\n\n\tfor voucher in vouchers:\n\t\tgl_entry = frappe.db.get_value(\"GL Entry\", dict(account=account, voucher_type=voucher['payment_doctype'], voucher_no=voucher['payment_name']), ['credit', 'debit'], as_dict=1)\n\t\tgl_amount, transaction_amount = (gl_entry.credit, transaction.deposit) if gl_entry.credit > 0 else (gl_entry.debit, transaction.withdrawal)\n\t\tallocated_amount = gl_amount if gl_amount >= transaction_amount else transaction_amount\n\n\t\ttransaction.append(\"payment_entries\", {\n\t\t\t\"payment_document\": voucher['payment_entry'].doctype,\n\t\t\t\"payment_entry\": voucher['payment_entry'].name,\n\t\t\t\"allocated_amount\": allocated_amount\n\t\t})\n\n\ttransaction.save()\n\ttransaction.update_allocations()\n\treturn frappe.get_doc(\"Bank Transaction\", bank_transaction_name)\n\n@frappe.whitelist()\ndef get_linked_payments(bank_transaction_name, document_types = None):\n\t# get all matching payments for a bank transaction\n\ttransaction = frappe.get_doc(\"Bank Transaction\", bank_transaction_name)\n\tbank_account = frappe.db.get_values(\n\t\t\"Bank Account\",\n\t\ttransaction.bank_account,\n\t\t[\"account\", \"company\"],\n\t\tas_dict=True)[0]\n\t(account, company) = (bank_account.account, bank_account.company)\n\tmatching = check_matching(account, company, transaction, document_types)\n\treturn matching\n\ndef check_matching(bank_account, company, transaction, document_types):\n\t# combine all types of vocuhers\n\tsubquery = get_queries(bank_account, company, transaction, document_types)\n\tfilters = {\n\t\t\t\"amount\": transaction.unallocated_amount,\n\t\t\t\"payment_type\" : \"Receive\" if transaction.deposit > 0 else \"Pay\",\n\t\t\t\"reference_no\": transaction.reference_number,\n\t\t\t\"party_type\": transaction.party_type,\n\t\t\t\"party\": transaction.party,\n\t\t\t\"bank_account\": bank_account\n\t\t}\n\n\tmatching_vouchers = []\n\tfor query in subquery:\n\t\tmatching_vouchers.extend(\n\t\t\tfrappe.db.sql(query, filters,)\n\t\t)\n\n\treturn sorted(matching_vouchers, key = lambda x: x[0], reverse=True) if matching_vouchers else []\n\ndef get_queries(bank_account, company, transaction, document_types):\n\t# get queries to get matching vouchers\n\tamount_condition = \"=\" if \"exact_match\" in document_types else \"<=\"\n\taccount_from_to = \"paid_to\" if transaction.deposit > 0 else \"paid_from\"\n\tqueries = []\n\n\tif \"payment_entry\" in document_types:\n\t\tpe_amount_matching = get_pe_matching_query(amount_condition, account_from_to, transaction)\n\t\tqueries.extend([pe_amount_matching])\n\n\tif \"journal_entry\" in document_types:\n\t\tje_amount_matching = get_je_matching_query(amount_condition, transaction)\n\t\tqueries.extend([je_amount_matching])\n\n\tif transaction.deposit > 0 and \"sales_invoice\" in document_types:\n\t\tsi_amount_matching = get_si_matching_query(amount_condition)\n\t\tqueries.extend([si_amount_matching])\n\n\tif transaction.withdrawal > 0:\n\t\tif \"purchase_invoice\" in document_types:\n\t\t\tpi_amount_matching = get_pi_matching_query(amount_condition)\n\t\t\tqueries.extend([pi_amount_matching])\n\n\t\tif \"expense_claim\" in document_types:\n\t\t\tec_amount_matching = get_ec_matching_query(bank_account, company, amount_condition)\n\t\t\tqueries.extend([ec_amount_matching])\n\n\treturn queries\n\ndef get_pe_matching_query(amount_condition, account_from_to, transaction):\n\t# get matching payment entries query\n\tif transaction.deposit > 0:\n\t\tcurrency_field = \"paid_to_account_currency as currency\"\n\telse:\n\t\tcurrency_field = \"paid_from_account_currency as currency\"\n\treturn f\"\"\"\n\tSELECT\n\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t+ 1 ) AS rank,\n\t\t'Payment Entry' as doctype,\n\t\tname,\n\t\tpaid_amount,\n\t\treference_no,\n\t\treference_date,\n\t\tparty,\n\t\tparty_type,\n\t\tposting_date,\n\t\t{currency_field}\n\tFROM\n\t\t`tabPayment Entry`\n\tWHERE\n\t\tpaid_amount {amount_condition} %(amount)s\n\t\tAND docstatus = 1\n\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\tAND ifnull(clearance_date, '') = \"\"\n\t\tAND {account_from_to} = %(bank_account)s\n\t\"\"\"\n\n\ndef get_je_matching_query(amount_condition, transaction):\n\t# get matching journal entry query\n\tcr_or_dr = \"credit\" if transaction.withdrawal > 0 else \"debit\"\n\treturn f\"\"\"\n\n\t\tSELECT\n\t\t\t(CASE WHEN je.cheque_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t+ 1) AS rank ,\n\t\t\t'Journal Entry' as doctype,\n\t\t\tje.name,\n\t\t\tjea.{cr_or_dr}_in_account_currency as paid_amount,\n\t\t\tje.cheque_no as reference_no,\n\t\t\tje.cheque_date as reference_date,\n\t\t\tje.pay_to_recd_from as party,\n\t\t\tjea.party_type,\n\t\t\tje.posting_date,\n\t\t\tjea.account_currency as currency\n\t\tFROM\n\t\t\t`tabJournal Entry Account` as jea\n\t\tJOIN\n\t\t\t`tabJournal Entry` as je\n\t\tON\n\t\t\tjea.parent = je.name\n\t\tWHERE\n\t\t\t(je.clearance_date is null or je.clearance_date='0000-00-00')\n\t\t\tAND jea.account = %(bank_account)s\n\t\t\tAND jea.{cr_or_dr}_in_account_currency {amount_condition} %(amount)s\n\t\t\tAND je.docstatus = 1\n\t\"\"\"\n\n\ndef get_si_matching_query(amount_condition):\n\t# get matchin sales invoice query\n\treturn f\"\"\"\n\t\tSELECT\n\t\t\t( CASE WHEN si.customer = %(party)s THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Sales Invoice' as doctype,\n\t\t\tsi.name,\n\t\t\tsip.amount as paid_amount,\n\t\t\t'' as reference_no,\n\t\t\t'' as reference_date,\n\t\t\tsi.customer as party,\n\t\t\t'Customer' as party_type,\n\t\t\tsi.posting_date,\n\t\t\tsi.currency\n\n\t\tFROM\n\t\t\t`tabSales Invoice Payment` as sip\n\t\tJOIN\n\t\t\t`tabSales Invoice` as si\n\t\tON\n\t\t\tsip.parent = si.name\n\t\tWHERE (sip.clearance_date is null or sip.clearance_date='0000-00-00')\n\t\t\tAND sip.account = %(bank_account)s\n\t\t\tAND sip.amount {amount_condition} %(amount)s\n\t\t\tAND si.docstatus = 1\n\t\"\"\"\n\ndef get_pi_matching_query(amount_condition):\n\t# get matching purchase invoice query\n\treturn f\"\"\"\n\t\tSELECT\n\t\t\t( CASE WHEN supplier = %(party)s THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Purchase Invoice' as doctype,\n\t\t\tname,\n\t\t\tpaid_amount,\n\t\t\t'' as reference_no,\n\t\t\t'' as reference_date,\n\t\t\tsupplier as party,\n\t\t\t'Supplier' as party_type,\n\t\t\tposting_date,\n\t\t\tcurrency\n\t\tFROM\n\t\t\t`tabPurchase Invoice`\n\t\tWHERE\n\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND is_paid = 1\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND cash_bank_account = %(bank_account)s\n\t\"\"\"\n\ndef get_ec_matching_query(bank_account, company, amount_condition):\n\t# get matching Expense Claim query\n\tmode_of_payments = [x[\"parent\"] for x in frappe.db.get_list(\"Mode of Payment Account\",\n\t\t\tfilters={\"default_account\": bank_account}, fields=[\"parent\"])]\n\tmode_of_payments = '(\\'' + '\\', \\''.join(mode_of_payments) + '\\' )'\n\tcompany_currency = get_company_currency(company)\n\treturn f\"\"\"\n\t\tSELECT\n\t\t\t( CASE WHEN employee = %(party)s THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Expense Claim' as doctype,\n\t\t\tname,\n\t\t\ttotal_sanctioned_amount as paid_amount,\n\t\t\t'' as reference_no,\n\t\t\t'' as reference_date,\n\t\t\temployee as party,\n\t\t\t'Employee' as party_type,\n\t\t\tposting_date,\n\t\t\t'{company_currency}' as currency\n\t\tFROM\n\t\t\t`tabExpense Claim`\n\t\tWHERE\n\t\t\ttotal_sanctioned_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND is_paid = 1\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND mode_of_payment in {mode_of_payments}\n\t\"\"\"\n","sub_path":"verp/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py","file_name":"bank_reconciliation_tool.py","file_ext":"py","file_size_in_byte":14866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"523535299","text":"from session_object.service import SessionObjectService\n\n\ndef favorites_items_number(request):\n service = SessionObjectService('favorites')\n favorites = service.get_or_create(request)\n return {\n 'favorites_items_number': favorites.get_items_number(),\n 'favorites': [item.product_id for item in favorites.items]\n }\n","sub_path":"favorites/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"158432377","text":"WHITE = (255, 255, 255) # Color Codes\nGREY = (128, 128, 128)\nYELLOW = (204, 204, 0)\nBLUE = (50, 255, 255)\nBLACK = (0, 0, 0)\nDARK_SQUARE = (126, 217, 87)\nLIGHT_SQUARE = (255, 255, 255)\nHIGHLIGHTCOLOR = (186, 202, 68)\nimport game\nimport docs\nimport sys\nimport pygame\nimport Save_Load\nimport Sound\npygame.init()\nclass menu():\n def __init__(self):\n self.default = 0.3\n self.screen = pygame.display.set_mode((800, 700))\n self.prev_call = False\n self.SOUND_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/sound.png\"), (40, 40))\n self.DOC_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/doc.png\"), (40, 40))\n self.preset = [\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\n [\"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\"],\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"]\n ]\n\n\n def display(self):\n MAIN_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/menu_2.png\"), (800, 800))\n LOGO_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/logo.png\"), (300, 300))\n PLAYBUTTON_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/playbutton.png\"), (200, 100))\n LOADBUTTON_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/loadbutton.png\"), (200, 100))\n QUITBUTTON_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/quitbutton.png\"), (200, 100))\n RATE_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/rate.png\"), (100, 40))\n SOUND_PLUS_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/plus.png\"), (40, 40))\n SOUND_MINUS_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/minus.png\"), (40, 40))\n pygame.display.set_caption(\"Desi Chess (Don't know how we managed to do this)\")\n self.button(MAIN_MENU_PNG,400,400)\n self.button(LOGO_MENU_PNG,400,100)\n self.button(PLAYBUTTON_MENU_PNG,400,300)\n self.button(LOADBUTTON_MENU_PNG,400,420)\n self.button(QUITBUTTON_MENU_PNG,400,540)\n self.button(self.SOUND_MENU_PNG,320,620)\n self.button(self.DOC_MENU_PNG,370,620)\n self.button(RATE_MENU_PNG,450,620)\n self.button(SOUND_PLUS_MENU_PNG,320,660)\n self.button(SOUND_MINUS_MENU_PNG,370,660)\n\n pygame.display.update()\n def button(self,img,row,col):\n button = img.get_rect()\n button.center = (row,col)\n self.screen.blit(img,button)\n def run(self):\n runs = True\n obj1 = volumeslider()\n obj1.playBGAudio()\n while runs:\n self.display()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n runs = False\n sys.exit(0)\n if event.type == pygame.MOUSEBUTTONDOWN:\n current_position = pygame.mouse.get_pos()\n col_1 = current_position[0]\n row_1 = current_position[1]\n print(col_1,row_1)\n if col_1 >= 300 and col_1 <= 500 and row_1 >= 250 and row_1 <= 350: #play\n Chess = game.GameEngine(\"w\",self.preset)\n obj = Sound.Audio()\n obj.playBGAudio()\n if col_1 >= 300 and col_1 <= 500 and row_1 >= 370 and row_1 <= 470: #load\n loading = Save_Load.Load()\n dummy = loading.run()\n file1 = open(\"Save_turn.txt\",\"r\")\n turn = file1.read()\n Chess = game.GameEngine(turn,dummy)\n obj = Sound.Audio()\n obj.playBGAudio()\n if col_1 >= 300 and col_1 <= 500 and row_1 >= 490 and row_1 <= 590: #quit\n runs = False\n sys.exit(0)\n if col_1 >= 300 and col_1 <= 340 and row_1 >= 600 and row_1 <= 640: #audio off/on\n if self.prev_call == False:\n self.prev_call = True\n self.SOUND_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/soundoff.png\"), (40, 40))\n obj1.Mute()\n elif self.prev_call == True:\n self.prev_call = False\n self.SOUND_MENU_PNG = pygame.transform.scale(pygame.image.load(\"ChessImage/sound.png\"), (40, 40))\n obj1.unMute()\n if col_1 >= 350 and col_1 <= 390 and row_1 >= 600 and row_1 <= 640:\n doc = docs.documentation()\n doc.run()\n if col_1 >= 400 and col_1 <= 500 and row_1 >= 600 and row_1 <= 640:\n plis = docs.rate_us()\n plis.run()\n if col_1 >= 300 and col_1 <= 340 and row_1 >= 640 and row_1 <= 680:\n self.default +=0.2\n obj1.volume_change(self.default)\n if col_1 >= 350 and col_1 <= 390 and row_1 >= 640 and row_1 <= 680:\n self.default -=0.2\n obj1.volume_change(self.default)\n\n\nclass volumeslider(Sound.Audio,menu):\n def __init__(self):\n super().__init__()\n def volume_change(self,defa):\n pygame.mixer.Channel(0).set_volume(defa)\n pygame.mixer.Channel(1).set_volume(defa)\n\n\n\nmenu_test = menu()\nmenu_test.run()\n\n\n\n","sub_path":"Chess/Final_Chess/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369857201","text":"# Import dependencies\nfrom copy import deepcopy\nfrom flask import render_template, request, send_file, url_for, jsonify\nimport io\nimport json\nimport os\nfrom Songs2Slides import app, core\nfrom Songs2Slides.config import defaultSettings\nimport tempfile\n\n\n\n# Home page\n@app.route(\"/\", methods=[\"GET\"])\ndef home():\n return render_template(\"home.html\")\n\n\n\n# Settings page\n@app.route(\"/settings/\", methods=[\"GET\"])\ndef settings():\n return render_template(\"settings.html\", title = \"Settings\")\n\n\n\n# Settings JSON file\n@app.route(\"/settings.json\", methods=[\"GET\"])\ndef settingsJSON():\n return jsonify(defaultSettings)\n\n\n\n# Get Powerpoint\n@app.route(\"/pptx\", methods=[\"POST\"])\ndef pptx():\n # Get settings\n settings = deepcopy(defaultSettings)\n try:\n for setting in request.json[\"settings\"]:\n settings[setting] = request.json[\"settings\"][setting]\n except:\n pass\n\n try:\n # Get temp\n temp = tempfile.NamedTemporaryFile(mode=\"w+t\", suffix=\".pptx\", delete=False)\n temp.close()\n\n # Get lyrics\n lyrics = json.loads(request.form[\"lyrics\"])\n\n # Save uploaded powerpoint\n if (request.files[\"pptxFile\"].filename != \"\"):\n request.files[\"pptxFile\"].save(temp.name)\n\n # Create powerpoint\n core.CreatePptx(lyrics, temp.name, settings, True)\n\n # Read file into stream\n with open(temp.name, 'rb') as f:\n pptx = io.BytesIO(f.read())\n finally:\n # Delete temp file\n os.remove(temp.name)\n \n # Return powerpoint\n return send_file(pptx, as_attachment=True, attachment_filename='download.pptx')\n\n\n\n# Get lyrics\n@app.route(\"/lyrics\", methods=[\"POST\"])\ndef lyrics():\n # Get settings\n settings = deepcopy(defaultSettings)\n try:\n for setting in request.json[\"settings\"]:\n settings[setting] = request.json[\"settings\"][setting]\n except:\n pass\n\n # Get lyrics\n lyrics = []\n failed = []\n for song in request.json[\"songs\"]:\n try:\n lyrics += core.ParseLyrics(song[0], song[1], settings)\n except:\n failed += [song]\n \n # Return lyrics\n return jsonify({\"lyrics\": lyrics, \"errors\": failed})\n\n\n\n# 404 page\n@app.errorhandler(404)\ndef error404(e):\n message = \"The requested URL was not found on the server.\"\n return render_template(\"error.html\", title=\"404 Not Found\", code=\"404\", message=message), 404\n","sub_path":"Songs2Slides/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429067056","text":"\n\n\nimport numpy as np\nimport gzip\nimport time\nimport pickle\n\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\nimport sys, os\nsys.path.insert(0, '../models')\nsys.path.insert(0, '../models/utils')\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.utils.data\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom vae_2 import VAE\n\nfrom inference_net import standard\n\n\n\n\n#Load data\nprint ('Loading data' )\ndata_location = home + '/Documents/MNIST_data/'\n# with open(data_location + 'binarized_mnist.pkl', 'rb') as f:\n# train_x, valid_x, test_x = pickle.load(f)\nwith open(data_location + 'binarized_mnist.pkl', 'rb') as f:\n train_x, valid_x, test_x = pickle.load(f, encoding='latin1')\nprint ('Train', train_x.shape)\nprint ('Valid', valid_x.shape)\nprint ('Test', test_x.shape)\n\n\n\n\n\n\n\n\n\n\ndef train_encdoer_and_decoder(model, train_x, test_x, k, batch_size,\n start_at, save_freq, display_epoch, \n path_to_save_variables):\n\n train_y = torch.from_numpy(np.zeros(len(train_x)))\n train_x = torch.from_numpy(train_x).float().type(model.dtype)\n\n train_ = torch.utils.data.TensorDataset(train_x, train_y)\n train_loader = torch.utils.data.DataLoader(train_, batch_size=batch_size, shuffle=True)\n\n #IWAE paper training strategy\n time_ = time.time()\n total_epochs = 0\n\n i_max = 7\n\n warmup_over_epochs = 100.\n\n\n all_params = []\n for aaa in model.q_dist.parameters():\n all_params.append(aaa)\n # for aaa in model.generator.parameters():\n # all_params.append(aaa)\n # print (len(all_params), 'number of params')\n\n print (model.q_dist)\n # print (model.q_dist.q)\n print (model.generator)\n\n for i in range(0,i_max+1):\n\n lr = .001 * 10**(-i/float(i_max))\n print (i, 'LR:', lr)\n\n optimizer = optim.Adam(all_params, lr=lr)\n\n epochs = 3**(i)\n\n for epoch in range(1, epochs + 1):\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n batch = Variable(data)#.type(model.dtype)\n\n optimizer.zero_grad()\n\n warmup = total_epochs/warmup_over_epochs\n if warmup > 1.:\n warmup = 1.\n\n elbo, logpxz, logqz = model.forward(batch, k=k, warmup=warmup)\n\n loss = -(elbo)\n loss.backward()\n optimizer.step()\n\n total_epochs += 1\n\n\n if total_epochs%display_epoch==0:\n print ('Train Epoch: {}/{}'.format(epoch, epochs),\n 'total_epochs {}'.format(total_epochs),\n 'LL:{:.3f}'.format(-loss.data[0]),\n 'logpxz:{:.3f}'.format(logpxz.data[0]),\n 'logqz:{:.3f}'.format(logqz.data[0]),\n 'warmup:{:.3f}'.format(warmup),\n 'T:{:.2f}'.format(time.time()-time_),\n )\n time_ = time.time()\n\n\n if total_epochs >= start_at and (total_epochs-start_at)%save_freq==0:\n\n # save params\n save_file = path_to_save_variables+'_encoder_'+str(total_epochs)+'.pt'\n torch.save(model.q_dist.state_dict(), save_file)\n print ('saved variables ' + save_file)\n # save_file = path_to_save_variables+'_generator_'+str(total_epochs)+'.pt'\n # torch.save(model.generator.state_dict(), save_file)\n # print ('saved variables ' + save_file)\n\n\n\n # save params\n save_file = path_to_save_variables+'_encoder_'+str(total_epochs)+'.pt'\n torch.save(model.q_dist.state_dict(), save_file)\n print ('saved variables ' + save_file)\n # save_file = path_to_save_variables+'_generator_'+str(total_epochs)+'.pt'\n # torch.save(model.generator.state_dict(), save_file)\n # print ('saved variables ' + save_file)\n\n\n print ('done training')\n\n\n\n\n\n\n\n\n\n\n\nx_size = 784\nz_size = 50\nbatch_size = 20\nk = 1\n#save params \nstart_at = 100\nsave_freq = 300\ndisplay_epoch = 3\n\n# hyper_config = { \n# 'x_size': x_size,\n# 'z_size': z_size,\n# 'act_func': F.tanh,# F.relu,\n# 'encoder_arch': [[x_size,200],[200,200],[200,z_size*2]],\n# 'decoder_arch': [[z_size,200],[200,200],[200,x_size]],\n# 'q_dist': standard, #FFG_LN#,#hnf,#aux_nf,#flow1,#,\n# 'cuda': 1\n# }\n\n\n\n\n# Which gpu\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\n\n\n\nhyper_config = { \n 'x_size': x_size,\n 'z_size': z_size,\n 'act_func': F.tanh,# F.relu,\n 'encoder_arch': [[x_size,z_size*2]],\n 'decoder_arch': [[z_size,200],[200,200],[200,x_size]],\n 'q_dist': standard, #FFG_LN#,#hnf,#aux_nf,#flow1,#,\n 'cuda': 1\n }\n\nprint ('Init model')\nmodel = VAE(hyper_config)\nif torch.cuda.is_available():\n model.cuda()\n\nprint('\\nModel:', hyper_config,'\\n')\n\n\n\n\n\n# path_to_load_variables=''\npath_to_save_variables=home+'/Documents/tmp/inference_suboptimality/vae_smallencoder_withflow' #.pt'\n# path_to_save_variables=home+'/Documents/tmp/inference_suboptimality/vae_regencoder' #.pt'\n# path_to_save_variables=home+'/Documents/tmp/pytorch_vae'+str(epochs)+'.pt'\n# path_to_save_variables=this_dir+'/params_'+model_name+'_'\n# path_to_save_variables=''\n\n\n# load generator\nprint ('Load params for decoder')\npath_to_load_variables=home+'/Documents/tmp/inference_suboptimality/vae_generator_3280.pt'\nmodel.generator.load_state_dict(torch.load(path_to_load_variables, map_location=lambda storage, loc: storage))\nprint ('loaded variables ' + path_to_load_variables)\nprint ()\n\n\n\n\n\n\n\n\nprint('\\nTraining')\n# train_lr_schedule(model=model, train_x=train_x, test_x=test_x, k=k, batch_size=batch_size,\n# start_at=start_at, save_freq=save_freq, display_epoch=display_epoch, \n# path_to_save_variables=path_to_save_variables)\n\n\ntrain_encdoer_and_decoder(model=model, train_x=train_x, test_x=test_x, k=k, batch_size=batch_size,\n start_at=start_at, save_freq=save_freq, display_epoch=display_epoch, \n path_to_save_variables=path_to_save_variables)\n\nprint ('Done.')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Inference_Suboptimality/flow_effect_on_amort_exp/train_encoder_only.py","file_name":"train_encoder_only.py","file_ext":"py","file_size_in_byte":6364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444174784","text":"import sys\nimport csv\nsys.path.append(\"C:/Users/User/Desktop/Introduction to databases/HW1/src\")\nfrom RDBDataTable import RDBDataTable\nimport json\n\nfile = open(\"rdb_table_test.txt\",\"w\")\n\n# find by primary key\nprint(\"find by primary key\")\ns = RDBDataTable('People', [\"playerID\"], None, False)\nresult = s.find_by_primary_key( [\"'willite01'\"],field_list=['playerID', 'nameLast'])\nprint(result)\n\nfile.write(\"\\n\\nTest find by primary key\")\nfile.write('\\nTable:People.csv, key = [\"willite01\"], field_list=[\"playerID\", \"nameLast\"]\\n')\nfile.write(\"Result:\"+str(result) + '\\n')\n\n# # find by template\nprint(\"find by template\")\ntmp = {\"nameLast\": \"Williams\", \"throws\": \"R\"}\nresult = s.find_by_template(tmp, field_list=['playerID', 'nameLast', 'birthCity', 'throws'])\nprint(result)\n\nfile.write(\"\\n\\nTest find by template\")\nfile.write('\\nTable:People.csv, tmp = {\"nameLast\": \"Williams\", \"throws\": \"R\"}\\n')\nfile.write(\"Result:\"+ str(result) + '\\n')\n\n\ns2 = RDBDataTable(\"offices\", [\"officeCode\"], None, False)\n# insert\ntmp = {\"city\": \"Tokyo\"}\nresult1 = s2.find_by_template(tmp, field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\n\nnew_r = {'officeCode': '201', 'city': 'Tokyo'}\nfile.write(\"\\n\\nTest insert\")\nfile.write('\\nTable:offices.csv, key = \"officeCode\": \"1\", new_r = {\"officeCode\": \"201\", \"city\": \"Tokyo\"}\\n')\nfile.write(\"\\nBefore insert:\" + str(result1) + '\\n')\ns2.insert(new_r)\nresult2 = s2.find_by_template(tmp,field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\nprint(\"\\n\\nAfter insert, \", result2)\nfile.write(\"After insert:\" + str(result2) + '\\n')\n\n# update by key\nkey = ['1']\nnew_v = {'state':'\"Mars\"','country':'\"Jupiter\"'}\nresult = s2.find_by_primary_key(key, field_list=['officeCode','city','phone','addressLine1','addressLine2','state','country'])\nfile.write(\"\\n\\nUpdate by template\")\nfile.write('\\nTable:Offices.csv, key: OfficeCode = 1 , new_v = {\"state\": \"Mars\", \"country\": \"Jupiter\"}\\n')\nfile.write(\"\\nBefore update,\" + str(result) + \"\\n\")\nresult = s2.update_by_key(key, new_v)\nprint(\"\\n\\nBefore update, \", str(result))\nresult2 = s2.find_by_primary_key(key,field_list=['officeCode','city','phone','addressLine1','addressLine2','state','country'])\nprint(\"\\n\\nAfter update, \", str(result2))\nfile.write(\"\\nAfter update,\" + str(result2) + \"\\n\")\n\n# update by template\ntmp = {'city': 'Boston'}\nnew_v = {'state': '\"Mars\"', 'country': '\"Jupiter\"'}\npre_result = s2.find_by_template(tmp,field_list=['officeCode','city','phone','addressLine1','addressLine2','state','country'])\nprint('Before update... ', pre_result)\nfile.write(\"\\n\\nUpdate by template\")\nfile.write('\\nTable:Offices.csv, tmp = {\"city\":\"Boston\"} , new_v = {\"state\":\"Mars\",\"country\":\"Jupiter\"}\\n')\nfile.write(\"\\nBefore update,\" + str(pre_result) + \"\\n\")\ns2.update_by_template(tmp, new_v)\nresult = s2.find_by_template(tmp,field_list=['officeCode','city','phone','addressLine1','addressLine2','state','country'])\nprint('\\n\\nAfter update ...', result)\nfile.write(\"\\nAfter update,\" + str(result) + '\\n')\n\n# delete by template\ntmp = {'city': 'Paris'}\nresult1 = s2.find_by_template(tmp,field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\nfile.write(\"\\n\\nTest delete by template\")\nfile.write('\\nTable:Offices.csv, tmp = {\"city\":\"Paris\"}\\n')\nprint(\"\\n\\nBefore delete, \", result1)\nfile.write(\"\\nBefore delete,\" + str(result1) + '\\n')\ns2.delete_by_template(tmp)\n\nresult2 = s2.find_by_template(tmp,field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\nprint(\"\\n\\nAfter delete, \", result2)\nfile.write(\"\\nAfter delete,\" + str(result2) + '\\n')\n\n# delete by key\n\nfile.write(\"\\n\\nTest delete by key\")\nfile.write('\\nTable:Offices.csv, key: \"officeCode\" = \"1\"\\n')\npre_result = s2.find_by_primary_key(['1'],field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\nfile.write(\"\\nBefore delete,\" + str(pre_result) + \"\\n\")\nresult = s2.delete_by_key(['\"1\"'])\nresult2 = s2.find_by_primary_key(['1'],field_list = ['officeCode','city','phone','addressLine1','addressLine2','state'])\nprint(\"\\nAfter delete,\" + str(result2) +\"\\n\")\nfile.write(\"\\nAfter delete,\" + str(result2) + '\\n')\n","sub_path":"homwork1/test/rdb_table_tests.py","file_name":"rdb_table_tests.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"246689981","text":"import numpy as np\r\nx,y,c=map(int,input().split())\r\nl2=[]\r\nl3=[]\r\nfor i in range(x):\r\n s=input()\r\n l=[int(i) for i in s if i!=\" \"]\r\n l2.append(l)\r\nfor i in range(y):\r\n str=input()\r\n m=[int(i) for i in str if i!=\" \"]\r\n l3.append(m)\r\na=np.array(l2)\r\nb=np.array(l3)\r\nprint(np.concatenate((a,b),axis=0))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Python(Hackerrank)/Concatenate.py","file_name":"Concatenate.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604125424","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Import dataset\ndataset = pd.read_csv(\n '/Users/MarcPlunkett/Desktop/datascience/ann/Churn_Modelling.csv')\n\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\nX\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n\nonehotencoder = OneHotEncoder(categorical_features=[1])\nX = onehotencoder.fit_transform(X).toarray()\n\nX = X[:, 1:]\n\nX\n\n# Splitting into training and test set\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n\n# Feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\n# Importing ANN libraries\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initialise ANN\nclassifier = Sequential()\n\n# Adding the input layer and first hidden layer\nclassifier.add(Dense(output_dim=6, init='uniform',\n activation='relu', input_dim=11))\n\n# Adding the second hidden layer\nclassifier.add(Dense(output_dim=6, init='uniform', activation='relu'))\n\n# Adding the output layer\nclassifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))\n\n# Compiling the ANN\nclassifier.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Fitting the ANN\nclassifier.fit(X_train, y_train, batch_size=10, nb_epoch=10)\n\n# Preidcting the test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nimport numpy\nnumpy.set_printoptions(threshold=numpy.nan)\nprint(y_pred)\nprint(cm)\n","sub_path":"ann/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167156105","text":"import numpy as np\nimport math\nimport random\n\ndef Heuristic(env,observation,vision):\n grid, pos, remain_steps= observation\n \n #Add borders in Grid perimeter according to vision range of helicopter, in order\n #to explore without boundary limits (Example: 2 vision add 2 cells at each side of Grid)\n Pad_grid=ExpandGrid(grid,vision) \n \n #Get neighborhood in agent current position and vision range\n neighborhood= get_neighborhood(Pad_grid,pos,vision) \n \n #Count fire cells by zone(8 zones) \n burned_densities={}\n \n #Up Zone\n up_zone=neighborhood[ 0:neighborhood.shape[0]-(vision+1),0:neighborhood.shape[1]] #Get Up Zone\n up_burned=Count_Burned_Trees(env,up_zone) #Get fire cells in up zone \n burned_densities[\"up\"]=up_burned #Add zone and fire density to dictionary\n \n #Up Left Zone\n up_left_zone=neighborhood[ 0:neighborhood.shape[0]-(vision+1),0:neighborhood.shape[0]-(vision+1) ]\n up_left_burned=Count_Burned_Trees(env,up_zone) \n burned_densities[\"up_left\"]=up_left_burned \n \n #Up Right Zone\n up_right_zone=neighborhood[ 0:neighborhood.shape[0]-(vision+1),neighborhood.shape[0]-vision:neighborhood.shape[0] ]\n up_right_burned=Count_Burned_Trees(env,up_right_zone) \n burned_densities[\"up_right\"]=up_right_burned \n \n #Down Zone\n down_zone=neighborhood[ neighborhood.shape[0]-vision:neighborhood.shape[0],0:neighborhood.shape[1]]\n down_burned=Count_Burned_Trees(env,down_zone) \n burned_densities[\"down\"]=down_burned\n \n #Down Left\n down_left_zone=neighborhood[ neighborhood.shape[0]-vision:neighborhood.shape[0], 0:neighborhood.shape[0]-(vision+1) ]\n down_left_burned=Count_Burned_Trees(env,down_left_zone) \n burned_densities[\"down_left\"]=down_left_burned \n \n #Down Right\n down_right_zone=neighborhood[ neighborhood.shape[0]-vision:neighborhood.shape[0], neighborhood.shape[0]-vision:neighborhood.shape[0] ]\n down_right_burned=Count_Burned_Trees(env,down_right_zone) \n burned_densities[\"down_right\"]=down_right_burned \n \n #Left Zone\n left_zone=neighborhood[ 0:neighborhood.shape[0],0:neighborhood.shape[0]-(vision+1)]\n left_burned=Count_Burned_Trees(env,left_zone) \n burned_densities[\"left\"]=left_burned\n \n #Right Zone\n right_zone=neighborhood[ 0:neighborhood.shape[1],neighborhood.shape[0]-vision:neighborhood.shape[0]]\n right_burned=Count_Burned_Trees(env,right_zone) \n burned_densities[\"right\"]=right_burned\n \n #Action based on burned trees/zone\n actions= ((1,2,3),\n (4,5,6),\n (7,8,9))\n \n #Max function will return a (key,value) tuple of the maximum value from the dictionary\n mx_tuple = max(burned_densities.items(),key = lambda x:x[1]) \n #Mx_tuple[1] indicates maximum dictionary items value\n max_list =[i[0] for i in burned_densities.items() if i[1]==mx_tuple[1]] \n \n #Apply Heuristic Rules according to fire cells in each zone\n #If there are more than 1 max burn zone, choose randomly\n if len(max_list) > 1: \n a=random.choice(max_list)\n if a==\"up\":\n action=actions[0][1]\n elif a==\"down\":\n action=actions[2][1]\n elif a==\"left\":\n action=actions[1][0]\n elif a==\"right\":\n action=actions[1][2]\n elif a==\"up_left\":\n action=actions[0][0]\n elif a==\"up_right\":\n action=actions[0][2]\n elif a==\"down_left\":\n action=actions[2][0]\n elif a==\"down_right\":\n action=actions[2][2]\n #If there is only one zone with max fire density (move in up,down,right,left or corners only)\n elif len(max_list)==1:\n if max_list[0]==\"up\":\n action=actions[0][1]\n elif max_list[0]==\"down\":\n action=actions[2][1]\n elif max_list[0]==\"left\":\n action=actions[1][0]\n elif max_list[0]==\"right\":\n action=actions[1][2]\n elif max_list[0]==\"up_left\":\n action=actions[0][0]\n elif max_list[0]==\"up_right\":\n action=actions[0][2]\n elif max_list[0]==\"down_left\":\n action=actions[2][0]\n elif max_list[0]==\"down_right\":\n action=actions[2][2]\n else:\n action=random.randint(1, 9)\n act=action \n return act\n\n#Receives a grid zone and count fire cells\ndef Count_Burned_Trees(env,zone):\n counter=0\n for row in range(zone.shape[0]):\n for col in range(zone.shape[1]):\n if zone[row][col]==env.fire:\n counter+=1\n return counter\n\n#Get neighborhood of agent according to vision range\ndef get_neighborhood(grid,pos,vision):\n pos_row=pos[0]\n pos_col=pos[1] \n neighborhood=grid[pos_row:pos_row+1+vision*2,pos_col:pos_col+1+vision*2]\n return neighborhood\n\ndef ExpandGrid(grid,vision): \n size = grid.shape \n PadGrid = np.zeros((size[0],size[1]), dtype=np.int16) \n for i in range(size[0]):\n for j in range(size[1]):\n if(grid[i][j][0]==1):\n PadGrid[i][j]=0\n elif(grid[i][j][1]==1):\n PadGrid[i][j]=1\n else:\n PadGrid[i][j]=2\n size=PadGrid.shape\n PadGrid2 = np.zeros((size[0]+2*vision,size[1]+2*vision), dtype=np.int16)\n PadGrid2[vision:-vision,vision:-vision] = PadGrid\n return PadGrid2","sub_path":"Heuristic.py","file_name":"Heuristic.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124754601","text":"# 2D shallow water equations in a closed channel\n# ==============================================\n#\n# Solves shallow water equations in closed rectangular domain\n# with sloping bathymetry.\n#\n# Initially water elevation is set to a piecewise linear function\n# with a slope in the deeper (left) end of the domain. This results\n# in a wave that develops a shock as it reaches shallower end of the domain.\n# This example tests the integrity of the 2D mode and stability of momentum\n# advection.\n#\n# Setting\n# solver_obj.nonlin = False\n# uses linear wave equation instead, and no shock develops.\n#\n# Tuomas Karna 2015-03-03\nfrom scipy.interpolate import interp1d\nfrom thetis import *\n\noutputdir = 'outputs'\nmesh2d = Mesh('channel_mesh.msh')\nprint_output('Loaded mesh '+mesh2d.name)\nprint_output('Exporting to '+outputdir)\n# total duration in seconds\nt_end = 6 * 3600\n# estimate of max advective velocity used to estimate time step\nu_mag = Constant(6.0)\n# export interval in seconds\nt_export = 100.0\n\n# bathymetry\nP1_2d = FunctionSpace(mesh2d, 'CG', 1)\nbathymetry_2d = Function(P1_2d, name='Bathymetry')\n\ndepth_oce = 20.0\ndepth_riv = 5.0 # 5.0 closed\nbath_x = np.array([0, 100e3])\nbath_v = np.array([depth_oce, depth_riv])\n\n\ndef bath(x, y, z):\n padval = 1e20\n x0 = np.hstack(([-padval], bath_x, [padval]))\n vals0 = np.hstack(([bath_v[0]], bath_v, [bath_v[-1]]))\n return interp1d(x0, vals0)(x)\n\n\nx_func = Function(P1_2d).interpolate(Expression('x[0]'))\nbathymetry_2d.dat.data[:] = bath(x_func.dat.data, 0, 0)\n\n# --- create solver ---\nsolver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)\noptions = solver_obj.options\noptions.t_export = t_export\noptions.t_end = t_end\noptions.outputdir = outputdir\noptions.u_advection = u_mag\noptions.check_vol_conservation_2d = True\noptions.fields_to_export = ['uv_2d', 'elev_2d']\noptions.timestepper_type = 'SSPRK33'\n# options.timestepper_type = 'SSPIMEX'\n# options.timestepper_type = 'CrankNicolson'\nif options.timestepper_type in ['CrankNicolson', 'SSPIMEX']:\n options.dt = 10.0 # override estimated dt\n# initial conditions, piecewise linear function\nelev_x = np.array([0, 30e3, 100e3])\nelev_v = np.array([6, 0, 0])\n\n\ndef elevation(x, y, z, x_array, val_array):\n padval = 1e20\n x0 = np.hstack(([-padval], x_array, [padval]))\n vals0 = np.hstack(([val_array[0]], val_array, [val_array[-1]]))\n return interp1d(x0, vals0)(x)\n\n\nx_func = Function(P1_2d).interpolate(Expression('x[0]'))\nelev_init = Function(P1_2d)\nelev_init.dat.data[:] = elevation(x_func.dat.data, 0, 0,\n elev_x, elev_v)\nsolver_obj.assign_initial_conditions(elev=elev_init)\n\nsolver_obj.iterate()\n","sub_path":"examples/channel2d/channel2d.py","file_name":"channel2d.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"582875328","text":"import unittest\nfrom inventoryAllocator import inventoryAllocator\n\nclass TestInventoryAllocator(unittest.TestCase):\n # Provided tests\n def test_provided_one_warehouse(self):\n order = { 'apple': 1 }\n warehouses = [{ 'name': 'owd', 'inventory': { 'apple': 1 } }]\n expected = [{ 'owd': { 'apple': 1 } }]\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # We can also test to make sure the remaining inventory in\n # the warehouses is correct, but since this is not required,\n # we will skip these tests (a sample is below)\n expected_remaining = [{ 'name': 'owd', 'inventory': { 'apple': 0 } }]\n self.assertEqual(warehouses, expected_remaining)\n\n def test_provided_multiple_warehouses(self):\n order = { 'apple': 10 }\n warehouses = [{ 'name': 'owd', 'inventory': { 'apple': 5 } }, { 'name': 'dm', 'inventory': { 'apple': 5 }}]\n expected = [{ 'owd': { 'apple': 5 } }, { 'dm': { 'apple': 5 }}]\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n def test_provided_not_enough_inv(self):\n order = { 'apple': 1 }\n warehouses = [{ 'name': 'owd', 'inventory': { 'apple': 0 } }]\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n\n # Everything empty\n def test_everything_empty(self):\n order = {}\n warehouses = []\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Empty order\n def test_empty_order(self):\n order = {}\n warehouses = [{ 'name': 'owd', 'inventory': { 'apple': 1 } }]\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Empty warehouses\n def test_empty_warehouses(self):\n order = { 'apple': 1, 'banana': 3 }\n warehouses = []\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Order with 0 of everything\n def test_order_nothing(self):\n order = { 'apple': 0, 'banana': 0, 'cherries': 0 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'apple': 1, 'banana': 2, 'lemon': 1 }},\n { 'name': 'wh2', \n 'inventory': { 'apple': 2, 'cherries': 5, 'lemon': 1 }}]\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Warehouses with no inventory\n def test_no_inventory(self):\n order = { 'apple': 1, 'banana': 12, 'cherries': 42 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'apple': 0, 'banana': 0, 'lemon': 0 }},\n { 'name': 'wh2', \n 'inventory': { 'apple': 0, 'cherries': 0, 'lemon': 0 }}]\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Order with 0 of everything and empty warehouses\n def test_order_nothing_no_inv(self):\n order = { 'apple': 0, 'banana': 0, 'cherries': 0 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'apple': 0, 'banana': 0, 'lemon': 0 }},\n { 'name': 'wh2', \n 'inventory': { 'apple': 0, 'cherries': 0, 'lemon': 0 }}]\n expected = []\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Exact match from single warehouse\n def test_exact_match_single(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': {'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1}}]\n expected = [{ 'wh1': {'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1}}]\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Exact match from multiple warehouses\n def test_exact_match_multiple(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 2 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3}},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3, 'item4': 1 }}]\n expected = [{'wh1': {'item2': 2 }},\n {'wh2': {'item1': 1, 'item4': 3, 'item5': 1}},\n {'wh3': {'item1': 1, 'item3': 6}},\n {'wh4': {'item2': 5, 'item3': 3}},\n {'wh5': {'item2': 3, 'item3': 3, 'item4': 1}}]\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Fill from multiple warehouses\n def test_fill_multiple(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 2, 'item6': 10 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 6, 'item8': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3}},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3, 'item4': 1 }},\n { 'name': 'wh6', \n 'inventory': { 'item1': 2, 'item2': 10, 'item3': 5 }}]\n expected = [{'wh1': {'item2': 2 }},\n {'wh2': {'item1': 1, 'item4': 3, 'item5': 1}},\n {'wh3': {'item1': 1, 'item3': 6}},\n {'wh4': {'item2': 5, 'item3': 3}},\n {'wh5': {'item2': 3, 'item3': 3, 'item4': 1}}]\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Extra warehouses (order fulfilled earlier)\n def test_extra_warehouses(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 10, 'item3': 10 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 2, 'item4': 4, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 2, 'item8': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3}},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3, 'item4': 1 }},\n { 'name': 'wh6', \n 'inventory': { 'item1': 2, 'item2': 10, 'item3': 5 }}]\n expected = [{'wh1': {'item2': 10, 'item3': 10 }},\n {'wh2': {'item1': 2, 'item4': 4, 'item5': 1}},\n {'wh3': {'item3': 2}}]\n\n # Two warehouses with the exact same inventory\n def test_same_inventory(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12}\n warehouses = [{ 'name': 'wh1', 'inventory': {'item1': 2, 'item2': 10, 'item3': 12}},\n { 'name': 'wh2', 'inventory': {'item1': 2, 'item2': 10, 'item3': 12}}]\n expected = [{ 'wh1': {'item1': 2, 'item2': 10, 'item3': 12}}]\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Order unfulfilled (inventory quantity off by 1)\n def test_unfulfilled_off_by_one(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 2, 'item6': 10 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 6, 'item8': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3 }},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3 }},\n { 'name': 'wh6', \n 'inventory': { 'item10': 2, 'item12': 10, 'item7': 5 }}]\n expected = []\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Order unfulfilled (warehouse missing 1 item)\n def test_unfulfilled_missing_item(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 2, 'item6': 10 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item6': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 6, 'item8': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3 }},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3 }},\n { 'name': 'wh6', \n 'inventory': { 'item10': 2, 'item12': 10, 'item7': 5 }}]\n expected = []\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Order unfulfilled (none of the warehouses have the items)\n def test_unfulfilled_missing_all(self):\n order = { 'item50': 2, 'item51': 10, 'item52': 12, 'item53': 4, 'item54': 1 }\n warehouses = [{ 'name': 'wh1', \n 'inventory': { 'item2': 2, 'item6': 10 }},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item3': 6, 'item8': 6 }},\n { 'name': 'wh4', \n 'inventory': { 'item2': 5, 'item3': 3 }},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3 }},\n { 'name': 'wh6', \n 'inventory': { 'item10': 2, 'item12': 10, 'item7': 5 }}]\n expected = []\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\n # Big test\n def test_big(self):\n order = { 'item1': 2, 'item2': 10, 'item3': 12, 'item4': 4, 'item5': 1,\n 'item6': 4, 'item7': 6, 'item8': 17, 'item9': 24, 'item10': 5 }\n\n warehouses = [ { 'name': 'wh1', \n 'inventory': { 'item1': 2, 'item2': 5, 'item10': 4, 'item12': 20}},\n { 'name': 'wh2', \n 'inventory': { 'item1': 1, 'item4': 3, 'item5': 1 }},\n { 'name': 'wh3', \n 'inventory': { 'item1': 1, 'item5': 6}},\n { 'name': 'wh4', \n 'inventory': { 'item2': 8, 'item3': 18}},\n { 'name': 'wh5', \n 'inventory': { 'item2': 3, 'item3': 3, 'item4': 1 }},\n { 'name': 'wh6', \n 'inventory': { 'item8': 2, 'item9': 10, 'item10': 1 }},\n { 'name': 'wh7', \n 'inventory': { 'item6': 3, 'item8': 5, 'item9': 7 }},\n { 'name': 'wh8', \n 'inventory': { 'item5': 3, 'item7': 3, 'item10': 1 }},\n { 'name': 'wh9', \n 'inventory': { 'item25': 3, 'item3': 3, 'item4': 1 }},\n { 'name': 'wh10', \n 'inventory': { 'item6': 1, 'item7': 3, 'item8': 15, 'item9': 42 }},\n { 'name': 'wh10', \n 'inventory': { 'item22': 1, 'item46': 3, 'item10': 3}}]\n\n expected = [{'wh1': {'item1': 2, 'item2': 5, 'item10': 4}},\n {'wh2': {'item4': 3, 'item5': 1}},\n {'wh4': {'item2': 5, 'item3': 12}},\n {'wh5': {'item4': 1}},\n {'wh6': {'item8': 2, 'item9': 10, 'item10': 1}},\n {'wh7': {'item6': 3, 'item8': 5, 'item9': 7}},\n {'wh8': {'item7': 3}},\n {'wh10': {'item6': 1, 'item7': 3, 'item8': 10, 'item9': 7}}]\n\n self.assertEqual(expected, inventoryAllocator(order, warehouses))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"inventory-allocator/src/inventoryAllocatorTests.py","file_name":"inventoryAllocatorTests.py","file_ext":"py","file_size_in_byte":12707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"608888959","text":"# coding: utf-8\n\nfrom setuptools import setup, find_packages\n\nimport mangasproject\n\ntry:\n with open('requirements.txt') as f:\n requirements = [l for l in f.read().splitlines() if l]\n\n with open('README.md') as readme:\n long_description = readme.read()\nexcept IOError:\n long_description = mangasproject.__description__\n requirements = [\n 'requests',\n 'tabulate',\n ]\n\nSETUP_ARGS = {\n 'name': mangasproject.__title__,\n 'description': mangasproject.__description__,\n 'long_description': long_description,\n 'version': mangasproject.__version__,\n 'author': mangasproject.__author__,\n 'url': mangasproject.__url__,\n 'keywords': 'manga, downloader, mangásPROJECT',\n 'include_package_data': True,\n 'zip_safe': False,\n 'license': 'Apache-2.0',\n 'platforms': 'any',\n 'install_requires': requirements,\n 'classifiers': [\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n 'entry_points': {\n 'console_scripts': [\n 'mangasproject = mangasproject.__main__:main'\n ]\n },\n 'packages': find_packages(),\n}\n\nif __name__ == '__main__':\n setup(**SETUP_ARGS)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"615373852","text":"class table:\n\n def __init__(self, no_result_msg=\"\"):\n self.col_len = -1\n self.max_lens = []\n self.header = []\n self.rows = []\n self.no_result_msg = no_result_msg\n\n def append(self, row, header=False):\n ret = False\n row_len = len(row)\n\n if row_len > 0:\n #Check if first row entered\n if self.col_len == -1:\n self.col_len = row_len\n\n #Init list that keep track of the string lengths entered\n #to later determine the format of the table\n for i in range(row_len):\n self.max_lens.append(0)\n\n #Validate that the user is keeping the row format\n if self.col_len != row_len:\n print(\"Invalid row length:\", self.col_len, \"!=\", row_len)\n else:\n #Check for possible maximum string length\n for i in range(row_len):\n if len(row[i]) > self.max_lens[i]:\n self.max_lens[i] = len(row[i])\n\n if header:\n self.header.clear()\n self.header.append(row)\n else:\n self.rows.append(row)\n\n ret = True\n else:\n print(\"Don't be silly, a row is not allowed to be empty. \"\n \"This is not a linebreak generator.\")\n\n return ret\n\n def print(self):\n if self.rows:\n format_str = \"\"\n #Concatenate format string\n for i in range(len(self.max_lens)):\n format_str += \"{\" + str(i) + \":\" + str(self.max_lens[i]) + \\\n \"}\\t\"\n\n #Add header to the rows\n total = self.header + self.rows\n\n #Print rows\n for row in total:\n print(format_str.format(*row))\n else:\n print(self.no_result_msg)\n","sub_path":"table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"45889311","text":"\"\"\"CS1.3 Module 4: Base Conversions\"\"\"\n\ndef binary_to_decimal(binary_str):\n \"\"\"returns the decimal number for the given binary digits\"\"\"\n decimal = 0\n pow_of_2 = 0\n\n binary_dict = {'0': 0, '1': 1}\n\n while binary_str:\n binary_digit = binary_dict[binary_str[-1]]\n decimal += binary_digit * (2 ** pow_of_2)\n binary_str = binary_str[:-1]\n pow_of_2 += 1\n\n return decimal\n\nassert binary_to_decimal('1011') == 11\nassert binary_to_decimal('00000') == 0\nassert binary_to_decimal('00001') == 1\nassert binary_to_decimal('11111') == 31\n\nprint(\"`binary_to_decimal` tests passed\")\n\ndef decimal_to_binary(decimal_num):\n \"\"\"the binary representation of the given decimal number\"\"\"\n # take my decimal number\n # keep dividing by base (2): loop?\n # I want to record the remainders some how: maybe store in a list?\n # I want to stop when my thing I'm dviding by is less than the\n # base: conditional or loop stopper: while loop?\n\n # special case for 0 and 1\n\n if decimal_num in range(2):\n return str(decimal_num)\n\n remainders = []\n\n while decimal_num > 0:\n\n remainder = decimal_num % 2\n remainders.append(str(remainder))\n # print(\"Remainder: \", remainder)\n\n decimal_num = decimal_num // 2\n # print(\"Decimal: \", decimal_num)\n\n # print(\"\".join(remainders))\n return \"\".join(remainders)[::-1]\n\nassert decimal_to_binary(0) == '0'\nassert decimal_to_binary(1) == '1'\nassert decimal_to_binary(2) == '10'\nassert decimal_to_binary(55) == '110111'\nassert decimal_to_binary(389) == '110000101'\n\nprint(\"`decimal_to_binary` tests passed\")\n\n# STRETCH: can you use what you have written so far to\n# create hex_to_decimal() and decimal_to_hex() functions?\n\ndef hex_to_decimal(hex_str):\n \"\"\"returns the decimal number for the given hex digits\"\"\"\n decimal = 0\n pow_of_16 = 0\n\n hex_dict = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n 'A': 10,\n 'B': 11,\n 'C': 12,\n 'D': 13,\n 'E': 14,\n 'F': 15,\n }\n\n while hex_str:\n hex_digit = hex_dict[hex_str[-1]]\n decimal += hex_digit * (16 ** pow_of_16)\n hex_str = hex_str[:-1]\n pow_of_16 += 1\n\n return decimal\n\nassert hex_to_decimal('B') == 11\nassert hex_to_decimal('2F9B') == 12187\nassert hex_to_decimal('FF') == 255\nassert hex_to_decimal('1F') == 31\n\nprint(\"`hex_to_decimal` tests passed\")\n\ndef decimal_to_hex(decimal_num):\n \"\"\"the binary representation of the given decimal number\"\"\"\n # take my decimal number\n # keep dividing by base (2): loop?\n # I want to record the remainders some how: maybe store in a list?\n # I want to stop when my thing I'm dviding by is less than the\n # base: conditional or loop stopper: while loop?\n\n # special case for 0 and 1\n\n hex_dict = {\n 0: '0',\n 1: '1',\n 2: '2',\n 3: '3',\n 4: '4',\n 5: '5',\n 6: '6',\n 7: '7',\n 8: '8',\n 9: '9',\n 10: 'A',\n 11: 'B',\n 12: 'C',\n 13: 'D',\n 14: 'E',\n 15: 'F',\n }\n\n if decimal_num in range(16):\n return hex_dict[decimal_num]\n\n remainders = []\n\n while decimal_num > 0:\n remainder = decimal_num % 16\n remainders.append(hex_dict[remainder])\n # print(\"Remainder: \", remainder)\n\n decimal_num = decimal_num // 16\n # print(\"Decimal: \", decimal_num)\n\n # print(\"\".join(remainders))\n return \"\".join(remainders)[::-1]\n\n\nassert decimal_to_hex(1096) == '448'\nassert decimal_to_hex(4096) == '1000'\nassert decimal_to_hex(652) == '28C'\nassert decimal_to_hex(255) == 'FF'\nassert decimal_to_hex(2_147_483_647) == '7FFFFFFF'\nassert decimal_to_hex(5) == '5'\nassert decimal_to_hex(15) == 'F'\nassert decimal_to_hex(0) == '0'\n\nprint(\"`decimal_to_hex` tests passed\")\n","sub_path":"base_conversions.py","file_name":"base_conversions.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"351168222","text":"import sys\nimport cv2\nimport numpy as np\n\nimg_names = ['image/img1.jpg','image/img2.jpg','image/img3.jpg']\nimgs = []\nfor name in img_names :\n img = cv2.imread(name)\n \n if img is None :\n print(\"Image load failed\")\n sys.exit()\n \n\n imgs.append(img)\n\n\nStitcher = cv2.Stitcher_create()\nret, pano = Stitcher.stitch(imgs)\n\nif ret == cv2.STITCHER_OK :\n cv2.imshow('pano', pano) \n cv2.waitKey()\n cv2.destroyAllWindows()\nelse :\n print(\"Stitch fail\")\n sys.exit()\n","sub_path":"08.특징점 검출과 매칭/7.stitching.py","file_name":"7.stitching.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"296803945","text":"import os\nimport sys\n\nfrom direct.showbase.ShowBase import ShowBase\nimport panda3d.core as p3d\n\nimport simplepbr\n\nimport gltf\n\np3d.load_prc_file_data(\n __file__,\n 'window-size 1024 768\\n'\n)\n\n\nclass App(ShowBase):\n def __init__(self):\n if len(sys.argv) < 2:\n print(\"Missing input file\")\n sys.exit(1)\n\n super().__init__()\n\n simplepbr.init()\n\n gltf.patch_loader(self.loader)\n\n infile = p3d.Filename.from_os_specific(os.path.abspath(sys.argv[1]))\n p3d.get_model_path().prepend_directory(infile.get_dirname())\n\n self.model_root = self.loader.load_model(infile, noCache=True)\n\n self.accept('escape', sys.exit)\n self.accept('q', sys.exit)\n self.accept('w', self.toggle_wireframe)\n self.accept('t', self.toggle_texture)\n self.accept('shift-l', self.model_root.ls)\n self.accept('shift-a', self.model_root.analyze)\n\n if not self.model_root.find('**/+Light'):\n self.light = self.render.attach_new_node(p3d.PointLight('light'))\n self.light.set_pos(-5, 5, 5)\n self.render.set_light(self.light)\n\n self.cam.set_pos(-6, 6, 6)\n self.cam.look_at(self.model_root)\n\n self.model_root.reparent_to(self.render)\n\n if self.model_root.find('**/+Character'):\n self.anims = p3d.AnimControlCollection()\n p3d.autoBind(self.model_root.node(), self.anims, ~0)\n if self.anims.get_num_anims() > 0:\n self.anims.get_anim(0).loop(True)\n\ndef main():\n App().run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gltf/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"204717815","text":"from json import dumps\n\nportal = context.getPortalObject()\n\ncomment_list = []\nif not follow_up:\n return dumps(comment_list)\n\n# get the follow up object\nobject_list = portal.portal_catalog(relative_url=follow_up) # with id keyword, this function will return a sequence data type which contains one element.\nif object_list:\n follow_up_object = object_list[0].getObject()\nelse:\n raise NotImplementedError(follow_up)\n \n# get the all HTML Posts which related to this follow up object\npost_list = portal.portal_catalog(portal_type=\"HTML Post\", strict_follow_up_uid=follow_up_object.getUid(), sort_on=(('modification_date', 'ascending'),), validation_state=\"published\") # with id keyword, this function will return a sequence data type which contains one element.\n\npreferred_date_order = portal.portal_preferences.getPreferredDateOrder()\n\ndef format_date(date):\n # XXX modification date & creation date are still in server timezone.\n # See merge request !17\n #\n # if default_time_zone:\n # date = date.toZone(default_time_zone)\n if preferred_date_order == 'dmy':\n return \"%s/%s/%s   %s\" % (date.dd(), date.mm(), date.year(), date.TimeMinutes())\n if preferred_date_order == 'mdy':\n return \"%s/%s/%s   %s\" % (date.mm(), date.dd(), date.year(), date.TimeMinutes())\n # ymd\n return \"%s/%s/%s   %s\" % (date.year(), date.mm(), date.dd(), date.TimeMinutes())\n\nfor post in post_list:\n owner = post.Base_getOwnerTitle()\n time_stamp = format_date(post.getStartDate())\n content = post.getTextContent()\n successor_list = post.getSuccessorValueList()\n successor_name = successor_link = None\n if successor_list:\n successor_link, successor_name = successor_list[0].getRelativeUrl(), successor_list[0].getFilename()\n \n comment_list.append((owner, time_stamp, content, successor_link, successor_name))\n \nreturn dumps(comment_list)\n","sub_path":"bt5/erp5_post/SkinTemplateItem/portal_skins/erp5_post/PostModule_getAscendingRelatedPostListAsJson.py","file_name":"PostModule_getAscendingRelatedPostListAsJson.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"496042894","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sklearn.metrics as skmetrics\n\nfrom ..base import BaseDetailedVisualization, BaseVisualization\nfrom ..utils import pil_loader\n\n\nclass ConfusionMatrix(BaseVisualization):\n def __init__(self, cm, labels, file, **kwargs):\n super().__init__(cm, labels, file, **kwargs)\n\n def plot(self):\n # get the confusion matrix\n df_cm = pd.DataFrame(self.cm, index=self.labels, columns=self.labels)\n\n # plot confusion matrix\n figure = plt.figure()\n ax = plt.axes()\n ax.set_title(\"Confusion matrix of predictions\")\n sns.set(font_scale=1)\n sns.heatmap(df_cm, cmap=\"YlGnBu\", ax=ax, annot=True)\n plt.yticks(rotation=0)\n\n figure.savefig(self.output_file, format=\"png\")\n\n return figure\n\n\nclass PrecisionRecallCurve(BaseDetailedVisualization):\n def plot(self):\n \"\"\"Generate plot\"\"\"\n figure = plt.figure()\n\n # plot pr curve for each class\n for i in range(len(self.labels)):\n filtered_true = [1 if x == i else 0 for x in self.y_true]\n filtered_pred = [1 if x == i else 0 for x in self.y_pred]\n precision, recall, _ = skmetrics.precision_recall_curve(\n filtered_true, filtered_pred\n )\n plt.plot(recall, precision, lw=2, label=self.labels[i])\n\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.legend(loc=\"best\")\n plt.title(\"precision vs. recall curve\")\n\n figure.savefig(self.output_file, format=\"png\")\n\n return figure\n\n\nclass ImageLabelsVisualization(BaseDetailedVisualization):\n def __init__(self, y_true, y_pred, y_prob, labels, file, **kwargs):\n super().__init__(y_true, y_pred, y_prob, labels, file, **kwargs)\n self.image = kwargs.get(\"image\")\n\n def plot(self):\n image = pil_loader(self.image)\n fig = self.plot_prediction(image, self.y_prob, self.labels)\n fig.savefig(self.output_file, format=\"png\")\n\n def plot_prediction(self, img, probs, classes):\n \"\"\"Display image and predictions from model\"\"\"\n\n # Convert results to dataframe for plotting\n result = pd.DataFrame({\"p\": probs}, index=classes)\n\n # Show the image\n fig = plt.figure(figsize=(16, 5))\n ax = plt.subplot(1, 2, 1)\n ax.imshow(img)\n\n # Set title to be the actual class\n ax.set_title(\"\", size=20)\n\n ax = plt.subplot(1, 2, 2)\n # Plot a bar plot of predictions\n result.sort_values(\"p\")[\"p\"].plot.barh(color=\"blue\", edgecolor=\"k\", ax=ax)\n plt.xlabel(\"Predicted Probability\")\n plt.tight_layout()\n\n return fig\n\n\ndef confusion_matrix(cm, labels, output_file):\n \"\"\"Wrapper for easier usage\"\"\"\n viz = ConfusionMatrix(cm, labels, output_file)\n return viz.plot()\n\n\ndef display_image_labels(image, y_true, y_pred, y_prob, labels, output_file):\n viz = ImageLabelsVisualization(\n y_true, y_pred, y_prob, labels, output_file, image=image\n )\n viz.plot()\n\n\ndef precision_recall_curve(y_true, y_pred, y_prob, labels, output_file):\n viz = PrecisionRecallCurve(y_true, y_pred, y_prob, labels, output_file)\n viz.plot()\n","sub_path":"aitlas/visualizations/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"226460784","text":"from django.urls import path, include, re_path\nfrom monitor import views\n\nurlpatterns = [\n re_path(r'^index/$', views.index, name='sys_data_index'),\n re_path(r'^system/(?P.+)/(?P\\d+)/$', views.host_info, name='host_info'),\n re_path(r'^get/cpu/(?P.+)/(?P\\d+)/$', views.get_cpu, name='get_cpu'),\n re_path(r'^get/mem/(?P.+)/(?P\\d+)/$', views.get_mem, name='get_mem'),\n # url(r'^get/pro/mem/(?P.+)/(?P\\d+)/$', views.get_pro_mem, name='get_pro_mem'),\n re_path(r'^get/disk/(?P.+)/(?P\\d+)/$', views.get_disk, name='get_disk'),\n re_path(r'^get/net/(?P.+)/(?P\\d+)/$', views.get_net, name='get_net'),\n]\n","sub_path":"monitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"651839207","text":"import json\nimport requests\n\ndef get_json_worker( food_id, food_json ):\n\t\"\"\"Gets the desired JSON of the food object with id 'food_id' and then\n\tappends the JSON to the food_json list.\n\n\tUsed as a worker in order to get food JSON with threading.\n\t\"\"\"\n\t# same code as a GET for a Food object\n\t# hint: that's exactly what it is.\n\tid_string = str(food_id)\n\turl = 'http://localhost:7474/db/data/node/' + id_string\n\theaders = { 'Content-Type': 'application/json', \n\t\t\t\t'Accept': 'application/json' }\n\tr = requests.get(url, headers=headers)\n\tjson_string = r.json()\n\tname = json_string[\"data\"][\"name\"]\n\tdescription = json_string[\"data\"][\"description\"]\n\timage_url = json_string[\"data\"][\"image_url\"]\n\tpayload = {\"id\": food_id, \"name\": name, \"image_url\": image_url,\n\t\t\t\t\"description\": description }\n\tfood_json.append( payload )\n\ndef get_json_master( food_list ):\n\t\"\"\"Gets the JSON for all food IDs in the food_list and appends them\n\tto the food_json list.\n\t\"\"\"\n\tthreads = []\n\tfood_json = []\n\tfor food_id in food_list:\n\t\ttry:\n\t\t\tt = threading.Thread(target=get_json_worker, args=(food_id, food_json))\n\t\t\tthreads.append( t )\n\t\t\tt.start()\n\t\texcept:\n\t\t\tpass\n\t# wait for all the threads to finish\n\tfor thread in threads:\n\t\tthread.join()\n","sub_path":"maestro/utils/recommendation_helpers.py","file_name":"recommendation_helpers.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"259936022","text":"#!/usr/bin/env python3\n\n# Import essential libraries\nfrom cv2 import FILLED\nfrom pyparsing import White\nimport requests\nimport numpy as np\nimport imutils\nimport mediapipe as mp\nimport math\nimport time\nimport serial\nimport requests\nimport cv2\n\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\n# Replace the below URL with your own. Make sure to add \"/shot.jpg\" at last.\nuseDefaultAddress = True\nurl_start = \"http://\"\nurl_end = \"/shot.jpg\"\n\nif useDefaultAddress:\n url = url_start + \"192.168.1.124:8080\" + url_end\nelse:\n url = url_start + input('Enter full ip address including port: ') + url_end\n\n\nser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)\nser.reset_input_buffer()\n\nimgRes = [float(640), float(480)]\nzDefault = 300\nxScaling, yScaling = 0.5, 0.5\n\n# For static images:\nIMAGE_FILES = []\nwith mp_hands.Hands(\n static_image_mode=False,\n max_num_hands=2,\n min_detection_confidence=0.5) as hands:\n for idx, file in enumerate(IMAGE_FILES):\n # Read an image, flip it around y-axis for correct handedness output (see\n # above).\n image = cv2.flip(cv2.imread(file), 1)\n # Convert the BGR image to RGB before processing.\n results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n # Print handedness and draw hand landmarks on the image.\n print('Handedness:', results.multi_handedness)\n if not results.multi_hand_landmarks:\n continue\n image_height, image_width, _ = image.shape\n annotated_image = image.copy()\n for hand_landmarks in results.multi_hand_landmarks:\n print('hand_landmarks:', hand_landmarks)\n print(\n f'Index finger tip coordinates: (',\n f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '\n f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_height})'\n )\n mp_drawing.draw_landmarks(\n annotated_image,\n hand_landmarks,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n cv2.imwrite(\n '/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1))\n # Draw hand world landmarks.\n if not results.multi_hand_world_landmarks:\n continue\n for hand_world_landmarks in results.multi_hand_world_landmarks:\n mp_drawing.plot_landmarks(\n hand_world_landmarks, mp_hands.HAND_CONNECTIONS, azimuth=5)\n\n\nwith mp_hands.Hands(\n model_complexity=0,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as hands:\n \n handPoints = []\n start_time = time.time()\n while True:\n img_resp = requests.get(url)\n img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)\n image = cv2.imdecode(img_arr, -1)\n image = cv2.flip(image, 1)\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(image)\n\n # Draw the hand annotations on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n pointPos = [xScaling*(hand_landmarks.landmark[9].x*imgRes[0]-imgRes[0]*0.5), yScaling*(imgRes[1]-imgRes[1]*hand_landmarks.landmark[9].y),\n zDefault-math.sqrt(pow(abs(hand_landmarks.landmark[0].y*imgRes[1]-hand_landmarks.landmark[5].y*imgRes[1]), 2)+pow(abs(hand_landmarks.landmark[0].x*imgRes[0]-hand_landmarks.landmark[5].x*imgRes[0]), 2))\n ]\n print(\" x:\", pointPos[0], \" y:\", pointPos[1], \" z:\", pointPos[2], sep='')\n data = str(pointPos[0])+\":\"+str(pointPos[1])+\":\"+str(pointPos[2])+\"\\n\"\n # cv2.circle(image, (int(pointPos[0]/xScaling+imgRes[0]*0.5), int(imgRes[1]/yScaling-pointPos[1])), 10, (0, 0, 0), -1)\n cv2.circle(image, (int(hand_landmarks.landmark[9].x*image.shape[1]), int(hand_landmarks.landmark[9].y*image.shape[0])), 10, (0, 0, 0), -1)\n\n # mp_drawing.draw_landmarks(\n # image,\n # hand_landmarks,\n # mp_hands.HAND_CONNECTIONS,\n # mp_drawing_styles.get_default_hand_landmarks_style(),\n # mp_drawing_styles.get_default_hand_connections_style())\n fps = int(round(1 / (time.time()-start_time)))\n print(\"fps:\", fps, end='')\n # print()\n ser.write(data.encode('utf-8'))\n cv2.imshow('MediaPipe Hands', cv2.resize(image, None, fx=1, fy=1))\n if cv2.waitKey(1) == 27:\n break\n start_time = time.time()\ncv2.destroyAllWindows()\n","sub_path":"teststuff/python/serialCOM tracking/mediapipe_serialCOM.py","file_name":"mediapipe_serialCOM.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"179698131","text":"from board.bit_board import BitBoard\n\nclass Test:\n pass\n\nif __name__ == '__main__':\n b = BitBoard()\n print('init test')\n b.init_board('init/init.csv')\n print('display test')\n b.display_board()\n b.put_stone(2, 4, 1)\n for i in range(10):\n for j in range(10):\n print(b.get_liberty(i,j), end='')\n print('')\n b.display_board()\n print(b.count_stone(1))\n print(b.get_stone(2,4,1))\n for i in range(10):\n for j in range(10):\n print(b.get_liberty(i,j), end='')\n print('')\n","sub_path":"test/test_bit_board.py","file_name":"test_bit_board.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"255398288","text":"import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Bullet(Sprite):\r\n '''Klasa do zarzadzania posciskami wystrzelonymi przez statek'''\r\n\r\n def __init__(self, ai_game):\r\n \"\"\"Utworzenie obiektu pocisku w aktualnym polozeniu statku\"\"\"\r\n super().__init__() #dziedziczy z SPrite\r\n self.screen = ai_game.screen\r\n self.settings = ai_game.settings\r\n self.color = self.settings.bullet_color\r\n\r\n #utworzenie prostokata pocisku w punkcie (0,0) a nastepnie zdefiniowanie dla niego odpowiedniego polozenia\r\n self.rect = pygame.Rect(0,0, self.settings.bullet_width, self.settings.bullet_height)\r\n self.rect.midtop = ai_game.ship.rect.midtop\r\n\r\n #polozenie pocisku jest zdefiniowane za pomoca wartosci zmiennoprzecinkowej\r\n self.y = float(self.rect.y)\r\n self.x = float(self.rect.x)\r\n\r\n def update(self):\r\n \"\"\"Porszuanie pocisku po ekranie\"\"\"\r\n #uaktualnianie połozenia pocisku\r\n self.y -= self.settings.bullet_speed\r\n #uaktualnianie połozenia prostokąta pocisku\r\n self.rect.y = self.y\r\n\r\n def draw_bullet(self):\r\n \"\"\"Wyswietlanie pocisku na ekranie\"\"\"\r\n pygame.draw.rect(self.screen, self.color, self.rect)\r\n\r\n","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"392942565","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom common import public\nimport re\n\n\nclass ddb_gzjfw_zhaobiao():\n need_check_ziduan = [u'title',\n u'city',\n u'pubdate',\n u'data_sources',\n u'bidwinning_pubdate'\n ]\n\n def check_title(self, source, ustr):\n \"\"\"裁判日期 校验\"\"\"\n ret = None\n if ustr and len(ustr):\n if ustr and len(ustr):\n if any(c in u')(' for c in ustr):\n ret = u'有特殊符号'\n return ret\n\n def check_city(self, source, ustr):\n \"\"\"裁判日期 校验\"\"\"\n ret = None\n if ustr and len(ustr):\n if ustr != u'贵州':\n ret = u\"city不为贵州\"\n return ret\n\n def check_pubdate(self, source, ustr):\n \"\"\"裁判日期 校验\"\"\"\n ret = None\n if ustr and len(ustr):\n if not public.date_format(ustr):\n ret = u\"不合法日期\"\n return ret\n\n def check_data_sources(self, source, ustr):\n \"\"\"裁判日期 校验\"\"\"\n ret = None\n if ustr and len(ustr):\n if ustr != u'贵州公共资源交易中心':\n ret = u\"需要等于贵州公共资源交易中心\"\n return ret\n\n def check_bidwinning_pubdate(self, source, ustr):\n \"\"\"裁判日期 校验\"\"\"\n ret = None\n if ustr and len(ustr):\n if not public.date_format(ustr):\n ret = u\"不合法日期\"\n return ret\n","sub_path":"src/parse/ddb_gzjfw_zhaobiao.py","file_name":"ddb_gzjfw_zhaobiao.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"624702988","text":"# -*- coding: utf8 -*-\n\nfrom gym import make, Wrapper\n\nclass Environment(Wrapper):\n \"\"\"Wrapper for a OpenAI Gym environment.\"\"\"\n\n changeable_parameters = []\n\n def __init__(self, name, add_at_iteration=0, change_variables=\"all\", **kwargs):\n super(Environment, self).__init__(make(name))\n self.name = name\n self.add_at_iteration = add_at_iteration\n self.args = kwargs\n self.change_variables = change_variables\n\n def to_dict(self):\n \"\"\"\n Extract the name and other important aspects of the environment.\n By default, these include the changeable parameters.\n \"\"\"\n d = {\"name\": self.name}\n for p in self.changeable_parameters:\n d[p[\"name\"]] = self.env.env.__getattribute__(p[\"name\"])\n return d\n","sub_path":"environment/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"142886845","text":"from django.test import TestCase\n\nfrom rdmo.core.testing.mixins import TestImportManageMixin\n\n\nclass ViewsManageTestCase(TestCase):\n\n fixtures = (\n 'users.json',\n 'groups.json',\n 'accounts.json',\n 'conditions.json',\n 'domain.json',\n 'options.json',\n 'views.json',\n )\n\n\nclass ViewsImportManageTests(TestImportManageMixin, ViewsManageTestCase):\n\n import_file = 'testing/xml/views.xml'\n","sub_path":"rdmo/views/tests/test_manage.py","file_name":"test_manage.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"644279075","text":"from setuptools import find_packages, setup\n\nwith open(\"requirements.txt\") as fd:\n install_requires = fd.read().splitlines()\n\nsetup(\n name=\"reef\",\n version=\"0.0.3\",\n url=\"https://srinivasreddy.dev\",\n license=\"BSD\",\n maintainer=\"Srinivas Reddy Thatiparthy\",\n maintainer_email=\"thatiparthysreenivas@gmail.com\",\n description=\" A miniproject to test python and remote development skills.\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n)\n","sub_path":"pypi_install_script/reef-0.0.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352919584","text":"import numpy\nfrom matplotlib.pylab import *\ng = 9.81\n\n# E.8 a)\ndef forwardEuler(f, N, T, uk):\n dt = float(T) / N\n for i in range(N):\n tk = i * dt\n yield [tk, uk]\n uk = uk + dt * f(uk, tk)\n\nclass FluidBody(object):\n def __init__(self, sigma, sigma_b, A, V, C_D):\n self.sigma = sigma\n self.sigma_b = sigma_b\n self.A = A\n self.V = V\n self.C_D = C_D\n self.g = g\n\n def fluid(self, uk, tk):\n g = self.g\n sigma = self.sigma\n sigma_b = self.sigma_b\n A = self.A\n V = self.V\n C_D = self.C_D\n return -g * (1 - sigma / sigma_b) - 0.5 * C_D * sigma * A / (sigma_b * V) * abs(uk) * uk\n\n\n def __call__(self, uk, tk):\n return self.fluid(uk, tk)\n\n# E.8 b)\ndef test_linear():\n fb = FluidBody(0.0, 1.0, 1.0, 1.0, 1.0)\n tks = []\n uks = []\n ratios = []\n olduk = 0\n k = 1.0\n eps = 1.0E-7\n allok = True\n for [tk, uk] in forwardEuler(fb, 100, 10.0, olduk):\n expected = (olduk - g * k * tk)\n if not((uk - expected) < eps):\n print( uk, expected)\n allok = False\n if allok:\n print ('all good and linear')\n\n# E.8 c)\n# The graph shows F_g and F_b as constant while F_d decreases towards 0.\ndef plot_forces():\n fb = FluidBody(0.79, 1003.0, 0.9, 0.08, 0.6)\n sigma = fb.sigma\n sigma_b = fb.sigma_b\n A = fb.A\n V = fb.V\n C_D = fb.C_D\n m = sigma_b * V\n F_g = -m * g\n F_d = lambda uk, tk: -0.5 * C_D * sigma * A * abs(uk) * uk\n F_b = sigma * g * V\n x = []\n y = []\n olduk = 50.0\n N = 100\n T = 1.0\n for [tk, uk] in forwardEuler(F_d, N, T, olduk):\n x.append(tk)\n y.append(uk)\n plot(x, y, 'r-')\n plot(x, [F_g for _ in y], 'g-')\n plot(x, [F_b for _ in y], 'b-')\n show()\n\n# E.8 d)\n# The graph shows velocity converging to ~-60 m/s\ndef plot_skydiver():\n fb = FluidBody(sigma=0.79, sigma_b=1003.0, A=0.9, V=0.08, C_D=0.6)\n N = 100\n T = 20.0\n olduk = 0.0\n x = []\n y = []\n for [tk, uk] in forwardEuler(fb, N, T, olduk):\n print( tk, uk)\n x.append(tk)\n y.append(uk)\n plot(x, y, 'b-')\n show()\n\n# E.8 e)\n# The graph shows velocity converging to ~3.64 m/s\ndef plot_ball():\n r = 0.11\n m = 0.43\n V = 4.0/3.0*pi*r**3\n sigma_b = m / V\n print(V,sigma_b,pi*r**2)\n fb = FluidBody(sigma=1000.0, sigma_b=sigma_b, A=pi*r**2, V=V, C_D=0.2)\n N = 1000\n T = 1.0\n olduk = 0.0\n x = []\n y = []\n for [tk, uk] in forwardEuler(fb, N, T, olduk):\n x.append(tk)\n y.append(uk)\n plot(x, y, 'b-')\n show()\n\ndef main():\n 'entry point'\n #test_linear()\n #plot_forces()\n #plot_skydiver()\n plot_ball()\n\n #plot(tks, uks)\n #show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"4thCourseS1/Extra/DifferentialEquations/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107515988","text":"def find_gcd(a, b): \n \n while(b): \n a, b = b, a % b \n \n return a\n\t\t \ndef gcd():\n\n l=(5,10,15,25,35);\n num1 = l[0] \n num2 = l[1] \n gcd = find_gcd(num1, num2) \n\n for i in range(2, len(l)): \n \t gcd = find_gcd(gcd, l[i]) \n print(gcd)\n\ngcd();\n\n\n","sub_path":"gcdarray.py","file_name":"gcdarray.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"461660510","text":"#!/usr/bin/env python3\n\"\"\" The Viretbi Algorithm \"\"\"\nimport numpy as np\n\n\ndef viterbi(Observation, Emission, Transition, Initial):\n \"\"\" calculates the most likely sequence of hidden states for a\n hidden markov model:\n\n - Observation is a numpy.ndarray of shape (T,) that contains\n the index of the observation\n - T is the number of observations\n - Emission is a numpy.ndarray of shape (N, M) containing the\n emission probability of a specific observation given a\n hidden state\n - Emission[i, j] is the probability of observing j given\n the hidden state i\n - N is the number of hidden states\n - M is the number of all possible observations\n - Transition is a 2D numpy.ndarray of shape (N, N) containing\n the transition probabilities\n - Transition[i, j] is the probability of transitioning\n from the hidden state i to j\n\n - Initial a numpy.ndarray of shape (N, 1) containing the\n probability of starting in a particular hidden state\n\n Returns: path, P, or None, None on failure\n - path is the a list of length T containing the most likely\n sequence of hidden states\n - P is the probability of obtaining the path sequence\n \"\"\"\n if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:\n return None, None\n T = Observation.shape[0]\n if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:\n return None, None\n N, M = Emission.shape\n if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:\n return None, None\n if Transition.shape != (N, N):\n return None, None\n if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:\n return None, None\n if Initial.shape != (N, 1):\n return None, None\n if not np.sum(Emission, axis=1).all():\n return None, None\n if not np.sum(Transition, axis=1).all():\n return None, None\n if not np.sum(Initial) == 1:\n return None, None\n\n Viterbi = np.empty((N, T))\n Backpointer = np.empty((N, T))\n\n Viterbi[:, 0] = Initial.T * Emission[:, Observation[0]]\n Backpointer[:, 0] = 0\n\n for i in range(1, T):\n prob = (Viterbi[:, i - 1] * Transition.T *\n Emission[np.newaxis, :, Observation[i]].T)\n Viterbi[:, i] = np.amax(prob, 1)\n Backpointer[:, i-1] = np.argmax(prob, 1)\n\n x = [0 for i in range(T)]\n last_state = np.argmax(Viterbi[:, T - 1])\n x[0] = last_state\n index = 1\n for i in range(T - 2, -1, -1):\n x[index] = int(Backpointer[int(last_state), i])\n last_state = Backpointer[int(last_state), i]\n index += 1\n x.reverse()\n P = np.amax(Viterbi, axis=0)\n P = np.amin(P)\n return x, P\n","sub_path":"unsupervised_learning/0x02-hmm/4-viterbi.py","file_name":"4-viterbi.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"201436921","text":"# Imports\nimport numpy as np\n\n\n# Linear Filter Class\nclass NLRLS():\n \"\"\" Non-Linear Recursive Least Squares (NL-RLS). \"\"\"\n\n # Special Methods\n def __init__(self, n, m, t, mu=1.0, alpha=0.0, eps=1e-9):\n \"\"\" Constructor. \"\"\"\n # Constants\n self.n = n\n self.mu = mu\n self.alpha = alpha\n self.eps = eps\n # Transformation Function\n self.transform = t\n # Initialise Base\n self.h = np.zeros(n)\n self.u = np.zeros(n)\n self.y = np.zeros(n)\n self.a = 0.0\n self.g = np.zeros(n)\n self.R_ = np.eye(n)/self.eps\n # Initialise Transformed\n self.h_ = np.zeros(m)\n self.u_ = np.zeros(m)\n\n # Filter Functions\n def input(self, u0):\n \"\"\" Update Input and make Prediction. \"\"\"\n # Update Inputs\n self.u[1:] = self.u[0:-2]\n self.u[0] = u0\n # Transform\n self.u_ = self.transform(self.u)\n # Predict\n self.y = np.inner(self.h_, self.u_)\n # Return\n return self.y\n\n def predict(self):\n \"\"\" Predict Filter. \"\"\"\n # Predict\n self.y = np.inner(self.h_, self.u_)\n # Return\n return self.y\n\n def update(self, a):\n \"\"\" Update Filter. \"\"\"\n # Update Coefficients\n self.g = np.dot(self.R_, self.u_)/(self.mu + np.inner(self.u_, np.dot(self.R_, self.u_)))\n self.R_ = (self.R_ - np.dot(self.outer(self.g, self.u_), self.g))/self.mu\n self.h = (1 - self.mu*self.alpha)*self.h_ + self.g*a","sub_path":"df/filters/linear/non_linear/nl_rls.py","file_name":"nl_rls.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"426147243","text":"#!/usr/bin/env python\n\n# expected input:\n# - genomedepthbases\n# e.g. \n# bedtools genomecov -ibam {mapped_bam} | python coverage_histogram plot.html > stats.txt\n\nimport collections\nimport sys\n\nimport plotly\nimport sys\n\nfrom plotly.graph_objs import Scatter, Layout, Bar\n\nc = 0\nt = 0\ncex0 = 0\ntex0 = 0\nh = collections.defaultdict(int)\n\nfor count, l in enumerate(sys.stdin):\n f = l.split('\\t')\n if f[0] != 'genome':\n continue\n depth = int(f[1])\n bases = int(f[2])\n c += depth * bases\n t += bases\n if depth > 0:\n cex0 += depth * bases\n tex0 += bases\n h[depth] += bases\n if count % 10000000 == 0:\n sys.stderr.write('coverage_histogram: processed {} lines\\n'.format(count))\n\nsys.stdout.write('coverage_total: {}\\ncoverage_bases: {}\\ncoverage_mean: {}\\ncoverage_total_no_zero: {}\\ncoverage_mean_no_zero: {}'.format(c, t, 1.0 * c/t, cex0, 1.0 * cex0/tex0))\n\nx = [x for x in range(1,100)]\n\nplotly.offline.plot({\n \"data\": [Scatter(x=x, y=[h[v] for v in x])],\n \"layout\": Layout(title=\"Coverage\", xaxis=dict(title='Coverage'), yaxis=dict(title='Count')),\n },\n filename=sys.argv[1],\n auto_open=False)\n","sub_path":"src/util/coverage_histogram.py","file_name":"coverage_histogram.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"206522620","text":"# Индивидуальное задание\n# Засько Богдан\n# КНИТ16-А\n# Для всех вариантов, №5\n\nimport math\na=30\nwhile True:\n try:\n corner=float(input(\"Введите значение угла: \"))\n if 0= 1].index)\n\n# Nb mesured TIV\nprint(headway[[\"hwt\", \"location\"]].groupby(\"location\").count().T.to_latex())\n\n# Nb < 1 s\nprint(headway_inf1s[[\"hwt\", \"location\"]].groupby(\"location\").count().T.to_latex())\n\n# Proportion TIV < 1 s (~VA)\nprint(headway_inf1s.loc[headway_inf1s.leading_user_type != 7][[\"hwt\", \"location\"]].groupby(\"location\").count().T.to_latex())\n\n# Proportion TIV < 1 s (VA)\nprint(headway_inf1s.loc[headway_inf1s.leading_user_type == 7][[\"hwt\", \"location\"]].groupby(\"location\").count().T.to_latex())\n\n","sub_path":"results/performance/headway/hwt-inf-1s.py","file_name":"hwt-inf-1s.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"108056769","text":"from __future__ import annotations\n\nimport fileinput as fi\nimport re\nimport itertools as it\nimport collections\n\nimport heapq\n\nimport attr\nimport typing\n\n\n@attr.s(auto_attribs=True, frozen=False)\nclass Floor:\n gens: typing.FrozenSet[str] = attr.Factory(frozenset)\n chips: typing.FrozenSet[str] = attr.Factory(frozenset)\n\n def valid(self) -> bool:\n return len(self.gens) == 0 or len(self.chips - self.gens) == 0\n\n\n@attr.s(auto_attribs=True, frozen=False, eq=False)\nclass State():\n floors: typing.Tuple[Floor, Floor, Floor, Floor]\n cur: int = 0\n\n\n sig: typing.Optional[typing.Tuple[int, typing.Tuple[typing.Tuple[int, int], ...]]] = None\n def signature(self) -> typing.Tuple[int, typing.Tuple[typing.Tuple[int, int], ...]]:\n if self.sig is not None:\n return self.sig\n\n gens = {}\n chips = {}\n for i, floor in enumerate(self.floors):\n for x in floor.gens:\n gens[x] = i\n\n for y in floor.chips:\n chips[y] = i\n\n sig = tuple(sorted((gens[k], chips[k]) for k in gens.keys()))\n\n self.sig = (self.cur, sig)\n\n return self.sig\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)): return NotImplemented\n return self.signature() == other.signature()\n # return self.sig == other.sig\n\n def __hash__(self) -> int:\n # This change here made all the difference between it\n return hash(self.signature())\n\n # If the state is valid\n def valid(self) -> bool:\n return 0 <= self.cur < len(self.floors) and all(x.valid() for x in self.floors)\n\n def score(self) -> int:\n f1, f2, f3, f4 = [10*len(x.gens) + len(x.chips) for x in self.floors]\n return 10000000 * f1 + 1000 * f2 + 10 * f3 + (len(self.floors)-1-self.cur)*13\n\n\n def done(self) -> bool:\n if self.cur != len(self.floors)-1:\n return False\n\n rooms_empty = all(len(x.gens) + len(x.chips) == 0 for x in self.floors[:-1])\n return rooms_empty\n\n def pos_next(self) -> typing.List[State]:\n ans = []\n floor_items = self.floors[self.cur]\n\n for l in range(0, 3):\n for jp in range(0, 3-l):\n if l + jp == 0:\n continue\n\n assert(l + jp < 3)\n\n gg = it.combinations(floor_items.gens, l)\n ff = it.combinations(floor_items.chips, jp)\n\n for negens, nechips in it.product(gg, ff):\n egens = frozenset(negens)\n echips = frozenset(nechips)\n\n current_floor = Floor(floor_items.gens - egens, floor_items.chips - echips)\n\n if not current_floor.valid():\n continue\n\n upad = []\n if self.cur != len(self.floors)-1:\n upad.append(1)\n\n if self.cur != 0:\n # Optimization here, where we never go down to an empty floor\n below = self.floors[self.cur-1]\n if len(below.gens) + len(below.chips) > 0:\n upad.append(-1)\n\n\n for n in upad:\n un = self.cur + n\n new_floor = Floor(self.floors[un].gens | egens, self.floors[un].chips | echips)\n if not new_floor.valid():\n continue\n\n wow = list(self.floors)\n wow[self.cur] = current_floor\n wow[un] = new_floor\n SN = State((wow[0], wow[1], wow[2], wow[3]), un)\n\n ans.append(SN)\n\n return ans\n\n\n def __lt__(self, other) -> bool:\n return self.cur > other.cur\n\n def pretty(self) -> str:\n ans = \"\"\n names: typing.Set[str] = set()\n for floor in self.floors:\n names.update([x for x in floor.gens])\n\n ma = {k: v for k, v in zip(\"ABCDEFLX\", sorted(names))}\n\n fxs = []\n for i, floor in enumerate(self.floors):\n gens = [x for x in floor.gens]\n chips = [x for x in floor.chips]\n\n m = \"F{}\".format(i+1)\n if i == self.cur:\n m += \" E\"\n else:\n m += \" \"\n\n for k, v in ma.items():\n if v in gens:\n m += \" G{}\".format(k)\n else:\n m += \" \"\n\n if v in chips:\n m += \" M{}\".format(k)\n else:\n m += \" \"\n\n fxs.append(m)\n\n ans += \"\\n\".join(reversed(fxs))\n\n return ans\n\n\nimport time\n\ndef animate_it(made_it: typing.Dict[State, State], fin: State):\n states = [fin]\n while states[-1] in made_it:\n states.append(made_it[states[-1]])\n\n rev_states = reversed(states)\n\n for i, state in enumerate(rev_states):\n # print(chr(27) + \"[2J\")\n print(\"\\033c\", end=\"\")\n print(\"=== STATE {} ===\".format(i))\n print(state.pretty())\n time.sleep(0.1)\n\n\n# S is the state\ndef solve_smart(S: State):\n Q = [(S.score(), 0, S)]\n\n got_it = collections.defaultdict(lambda: 3000000000000)\n got_it[S] = 0\n\n min_find = 10000000000000000000000000000000\n made_it = {}\n while len(Q) > 0:\n _, depth, qs = heapq.heappop(Q)\n if depth + 1 >= min_find:\n continue\n\n added = 0\n new = qs.pos_next()\n for newq in new:\n # we keep track of the minimal score to get here\n if got_it[newq] > (depth + 1):\n got_it[newq] = depth + 1\n made_it[newq] = qs\n else:\n continue\n\n if newq.done():\n if depth + 1 < min_find:\n min_find = depth + 1\n # print(\"New min_find: {}\".format(min_find))\n # animate_it(made_it, newq)\n\n heapq.heappush(Q, ((depth + 1) * 100 + newq.score(), depth + 1, newq))\n\n return min_find\n\n\ntest1 = State(\n (\n Floor(frozenset([]), frozenset([\"hydrogen\", \"lithium\"])), # F1\n Floor(frozenset([\"hydrogen\"]), frozenset([])),\n Floor(frozenset([\"lithium\"]), frozenset([])),\n Floor(frozenset([]), frozenset([])),\n ),\n)\n\n\n# The first floor contains a promethium generator and a promethium-compatible microchip.\n# The second floor contains a cobalt generator, a curium generator, a ruthenium generator, and a plutonium generator.\n# The third floor contains a cobalt-compatible microchip, a curium-compatible microchip, a ruthenium-compatible microchip, and a plutonium-compatible microchip.\n# The fourth floor contains nothing relevant.\n\nreal1 = State(\n (\n Floor(frozenset([\"promethium\"]), frozenset([\"promethium\"])),\n Floor(frozenset(((\"cobalt\"), (\"curium\"), (\"ruthenium\"), (\"plutonium\"))), frozenset()),\n Floor(frozenset(), frozenset(((\"cobalt\"), (\"curium\"), (\"ruthenium\"), (\"plutonium\")))),\n Floor(frozenset(), frozenset()),\n )\n)\n\nreal2 = State(\n (\n Floor(frozenset((\"elerium\", \"promethium\", \"dilithium\")), frozenset((\"promethium\", \"elerium\", \"dilithium\"))),\n Floor(frozenset(((\"cobalt\"), (\"curium\"), (\"ruthenium\"), (\"plutonium\"))), frozenset()),\n Floor(frozenset(), frozenset(((\"cobalt\"), (\"curium\"), (\"ruthenium\"), (\"plutonium\")))),\n Floor(frozenset(), frozenset()),\n )\n)\n\n\n# print(solve_smart(test1))\nprint(solve_smart(real1))\n# print(solve_smart(real2))\n","sub_path":"2016/11/y2016_d11_p01.py","file_name":"y2016_d11_p01.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"48005966","text":"\"\"\"\nthis module conerts date string to week number and backward.\n\nit has the following functionalities.\n- convert date string to week number\n- convert week number to year month\n\nauthor\n- @ZL 20210821\n\nchangelog\n- v0.01, initial build\n\n\"\"\"\n\nfrom lib.utility.types import (\n datetime, Date, \n Tuple, logging,\n)\n\ndef date2wkno(date_str:str)->int:\n \"\"\"return week number from a date string\n\n Args:\n date_str (str): date string. for example: '2021-08-21 00:00:00'\n\n Returns:\n int: yyww. for example: 2125\n \"\"\"\n underscore = '_'\n slash = '/'\n if underscore in date_str:\n pfmt = \"%Y-%m-%d %H:%M:%S\"\n elif slash in date_str:\n pfmt = \"%Y/%m/%d %H:%M:%S\"\n else:\n pfmt = \"%Y %m %d %H:%M:%S\"\n\n d = datetime.datetime.strptime(date_str, pfmt).date()\n y = d.year % 100\n w = d.isocalendar()[1]\n fmt = \"{}{}\".format(y, w)\n return int(fmt)\n\ndef wkno2ym(week_number:int, year:int)->str:\n \"\"\"return short yy mm from week number\n\n Args:\n week_number (int): week number. format is 2132. 21 shorts for 2021, 32 shorts for week number 32\n\n Returns:\n AnyStr: yy mm. for example: 21 Aug\n\n Ref link: https://stackoverflow.com/questions/17087314/get-date-from-week-number\n \"\"\"\n\n i = 2 # index slices week_nummber: yyww\n d = 1 # pick monday as first day\n c = year // 100 * 100 # get first year of century\n\n cstr = str(week_number)\n y = int(cstr[:i]) + c\n w = cstr[i:]\n fmt = \"{}-{}-{}\".format(y, w, d)\n d = datetime.datetime.strptime(fmt, \"%Y-%W-%w\") # return yyyy-mm-dd\n return d.strftime(\"%y %b\") # return short version yy-mm\n\ndef GetBeginEndDateFromCalendarWeek(year:int, calendar_week:int)->Tuple[Date,Date]:\n \"\"\"return tuple[monday, sunday] from year, calendary week. for example: 2021, 32\n\n Args:\n year (int): natural year\n calendar_week (int): calendar week\n\n Returns:\n Tuple[Date,Date]: tuple\n\n Ref link: https://stackoverflow.com/questions/51194745/get-first-and-last-day-of-given-week-number-in-python\n \"\"\"\n monday = datetime.datetime.strptime(f'{year}-{calendar_week}-1', \"%Y-%W-%w\").date()\n return monday, monday + datetime.timedelta(days=6.9)\n\nif __name__ == '__main__':\n today = \"2021/06/27 09:15:32\"\n logging.info(date2wkno(today))\n\n week_number = 2132\n d = wkno2ym(week_number, 1985)\n logging.info(\"week number('{0}') -> {1}\".format(week_number, d))\n logging.info(\"start date: {}, end date: {}\".format(*GetBeginEndDateFromCalendarWeek(2021, 1)))","sub_path":"20220107 WW_Pmod_SMLD_Control_System/lib/utility/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"229938359","text":"import os\npath=\"C:/Documents and Settings/cukier_j.OECDMAIN/My Documents/vastChallenge/mc3/MC_3_Materials_4_4_2011\"\nout=open(\"C:/Documents and Settings/cukier_j.OECDMAIN/My Documents/vastChallenge/mc3/list-kw.txt\",'w')\nlisting = os.listdir(path)\n#groups=('network of dread', 'ethical treatment of lab mice', 'brotherhood of antartica', 'anarchists for freedom', 'brotherhood of maintenance of way employees')\nkeywords=('attack', 'threat', 'terror', 'strike', 'explosion', 'bomb', 'danger', 'arson', 'suspicious', 'homeland security')\nfor infile in listing:\n myfile=open(path+\"/\"+infile, 'r')\n keeper=False\n vast=False\n parse=myfile.readlines()\n for line in parse:\n if line.lower().find('vastopolis')>-1:\n vast=True\n for kw in keywords:\n if line.lower().find(kw)>-1:\n keeper=True\n break\n if (keeper and vast):\n out.write(infile+\"\\t\"+parse[0])\n myfile.close()\nout.close()\n\n ","sub_path":"VAST challenge/vastMC3_v4.py","file_name":"vastMC3_v4.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"440946565","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport h5py \n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", default= \"D:\\\\workspace\\\\diplomka\\\\src\\\\3Dconversion\\\\test_0_0.h5\", type=str, help=\"npz file to load\")\n args = parser.parse_args()\n \n f = h5py.File(args.f)\n print(f)\n index = 0\n labels = np.array(f['label'])\n points = np.array(f['data'])\n soms = np.array(f['som'])\n fig = plt.figure()\n ax = Axes3D(fig)\n #ax.scatter(points[index,:,0], points[index,:,1],points[index,:,2])\n ax.scatter(soms[index,:,0], soms[index,:,1],soms[index,:,2])\n plt.show()","sub_path":"dockers/data_conversion/visualize_points.py","file_name":"visualize_points.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"135814219","text":"from unittest import mock\nfrom Loan.model import *\nfrom Loan.auth import *\nfrom microservice.model import User\nfrom Loan import app\n\nuser = User(id=1, username='Vijay', password='password', address='Chennai', state='Tamilnadu', country='India', email='v@gmail.com', contact=8894574145, dob='21/1/1993', pan=41512563145, accountType='Saving')\ntest = app.test_client()\n\n\n@mock.patch('Loan.view.get_user', return_value=user)\ndef test_get_appliedLoan_200(mocked_object_id):\n response = test.get('/loan/1')\n status = response.status_code\n assert status == 200\n\n\n@mock.patch('Loan.view.get_user', return_value=user)\ndef test_get_appliedLoan_403(mocked_object_id):\n response = test.get('/loan/4')\n status = response.status_code\n assert status == 403\n\n\nloan = {\n \"LoanType\": \"Car Loan\",\n \"loanAmount\": 500000,\n \"Date\": \"21/1/2020\",\n \"RateofInterest\": \"2%.\",\n \"id\": 1,\n \"DurationOfLoan\": 3\n}\n\n\n@mock.patch('Loan.view.get_user', return_value=user)\ndef test_apply_Loan_403(mocked_object_id):\n response = test.post('/loan', json=loan)\n status = response.status_code\n assert status == 403\n\n\n@mock.patch('Loan.view.get_user', return_value=user)\ndef test_apply_Loan_400(mocked_object_id):\n loan[\"LoanType\"] = \"\"\n response = test.post('/loan', json=loan)\n status = response.status_code\n assert status == 400\n","sub_path":"Loan/test_loan.py","file_name":"test_loan.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"236791399","text":"# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module contains functions to construct many-body observables whose expectation\nvalues can be used to simulate molecular properties.\n\"\"\"\n# pylint: disable=too-many-arguments, too-few-public-methods\nimport numpy as np\nfrom openfermion.ops import FermionOperator\nfrom openfermion.transforms import bravyi_kitaev, jordan_wigner\n\nfrom . import structure\n\n\ndef _spin2_matrix_elements(sz):\n r\"\"\"Builds the table of matrix elements\n :math:`\\langle \\bm{\\alpha}, \\bm{\\beta} \\vert \\hat{s}_1 \\cdot \\hat{s}_2 \\vert\n \\bm{\\gamma}, \\bm{\\delta} \\rangle` of the two-particle spin operator\n :math:`\\hat{s}_1 \\cdot \\hat{s}_2`.\n\n The matrix elements are evaluated using the expression\n\n .. math::\n\n \\langle ~ (\\alpha, s_{z_\\alpha});~ (\\beta, s_{z_\\beta}) ~ \\vert \\hat{s}_1 &&\n \\cdot \\hat{s}_2 \\vert ~ (\\gamma, s_{z_\\gamma}); ~ (\\delta, s_{z_\\gamma}) ~ \\rangle =\n \\delta_{\\alpha,\\delta} \\delta_{\\beta,\\gamma} \\\\\n && \\times \\left( \\frac{1}{2} \\delta_{s_{z_\\alpha}, s_{z_\\delta}+1}\n \\delta_{s_{z_\\beta}, s_{z_\\gamma}-1} + \\frac{1}{2} \\delta_{s_{z_\\alpha}, s_{z_\\delta}-1}\n \\delta_{s_{z_\\beta}, s_{z_\\gamma}+1} + s_{z_\\alpha} s_{z_\\beta}\n \\delta_{s_{z_\\alpha}, s_{z_\\delta}} \\delta_{s_{z_\\beta}, s_{z_\\gamma}} \\right),\n\n where :math:`\\alpha` and :math:`s_{z_\\alpha}` refer to the quantum numbers of the spatial\n function and the spin projection, respectively, of the single-particle state\n :math:`\\vert \\bm{\\alpha} \\rangle \\equiv \\vert \\alpha, s_{z_\\alpha} \\rangle`.\n\n Args:\n sz (array[float]): spin-projection of the single-particle states\n\n Returns:\n array: NumPy array with the table of matrix elements. The first four columns\n contain the indices :math:`\\bm{\\alpha}`, :math:`\\bm{\\beta}`, :math:`\\bm{\\gamma}`,\n :math:`\\bm{\\delta}` and the fifth column stores the computed matrix element.\n\n **Example**\n\n >>> sz = np.array([0.5, -0.5])\n >>> print(_spin2_matrix_elements(sz))\n [[ 0. 0. 0. 0. 0.25]\n [ 0. 1. 1. 0. -0.25]\n [ 1. 0. 0. 1. -0.25]\n [ 1. 1. 1. 1. 0.25]\n [ 0. 1. 0. 1. 0.5 ]\n [ 1. 0. 1. 0. 0.5 ]]\n \"\"\"\n\n n = np.arange(sz.size)\n\n alpha = n.reshape(-1, 1, 1, 1)\n beta = n.reshape(1, -1, 1, 1)\n gamma = n.reshape(1, 1, -1, 1)\n delta = n.reshape(1, 1, 1, -1)\n\n # we only care about indices satisfying the following boolean mask\n mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)\n\n # diagonal elements\n diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])\n diag_indices = np.argwhere(np.logical_and(mask, diag_mask))\n diag_values = (sz[alpha] * sz[beta]).flatten()\n\n diag = np.vstack([diag_indices.T, diag_values]).T\n\n # off-diagonal elements\n m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)\n m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)\n\n off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))\n off_diag_indices = np.argwhere(off_diag_mask)\n off_diag_values = np.full([len(off_diag_indices)], 0.5)\n\n off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T\n\n # combine the off diagonal and diagonal tables into a single table\n return np.vstack([diag, off_diag])\n\n\ndef spin2(electrons, orbitals, mapping=\"jordan_wigner\", wires=None):\n r\"\"\"Computes the total spin operator :math:`\\hat{S}^2`.\n\n The total spin operator :math:`\\hat{S}^2` is given by\n\n .. math::\n\n \\hat{S}^2 = \\frac{3}{4}N + \\sum_{ \\bm{\\alpha}, \\bm{\\beta}, \\bm{\\gamma}, \\bm{\\delta} }\n \\langle \\bm{\\alpha}, \\bm{\\beta} \\vert \\hat{s}_1 \\cdot \\hat{s}_2\n \\vert \\bm{\\gamma}, \\bm{\\delta} \\rangle ~\n \\hat{c}_\\bm{\\alpha}^\\dagger \\hat{c}_\\bm{\\beta}^\\dagger\n \\hat{c}_\\bm{\\gamma} \\hat{c}_\\bm{\\delta},\n \n where the two-particle matrix elements are computed as,\n\n .. math::\n\n \\langle \\bm{\\alpha}, \\bm{\\beta} \\vert \\hat{s}_1 \\cdot \\hat{s}_2\n \\vert \\bm{\\gamma}, \\bm{\\delta} \\rangle = && \\delta_{\\alpha,\\delta} \\delta_{\\beta,\\gamma} \\\\\n && \\times \\left( \\frac{1}{2} \\delta_{s_{z_\\alpha}, s_{z_\\delta}+1}\n \\delta_{s_{z_\\beta}, s_{z_\\gamma}-1} + \\frac{1}{2} \\delta_{s_{z_\\alpha}, s_{z_\\delta}-1}\n \\delta_{s_{z_\\beta}, s_{z_\\gamma}+1} + s_{z_\\alpha} s_{z_\\beta}\n \\delta_{s_{z_\\alpha}, s_{z_\\delta}} \\delta_{s_{z_\\beta}, s_{z_\\gamma}} \\right).\n\n In the equations above :math:`N` is the number of electrons, :math:`\\alpha` refer to the\n quantum numbers of the spatial wave function and :math:`s_{z_\\alpha}` is\n the spin projection of the single-particle state\n :math:`\\vert \\bm{\\alpha} \\rangle \\equiv \\vert \\alpha, s_{z_\\alpha} \\rangle`.\n The operators :math:`\\hat{c}^\\dagger` and :math:`\\hat{c}` are the particle creation\n and annihilation operators, respectively.\n\n Args:\n electrons (int): Number of electrons. If an active space is defined, this is\n the number of active electrons.\n orbitals (int): Number of *spin* orbitals. If an active space is defined, this is\n the number of active spin-orbitals.\n mapping (str): Specifies the transformation to map the fermionic operator to the\n Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.\n wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator\n to an observable measurable in a PennyLane ansatz.\n For types Wires/list/tuple, each item in the iterable represents a wire label\n corresponding to the qubit number equal to its index.\n For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.\n If None, will use identity map (e.g. 0->0, 1->1, ...).\n\n Returns:\n pennylane.Hamiltonian: the total spin observable :math:`\\hat{S}^2`\n\n **Example**\n\n >>> electrons = 2\n >>> orbitals = 4\n >>> S2 = spin2(electrons, orbitals, mapping=\"jordan_wigner\")\n >>> print(S2)\n (0.75) [I0]\n + (0.375) [Z1]\n + (-0.375) [Z0 Z1]\n + (0.125) [Z0 Z2]\n + (0.375) [Z0]\n + (-0.125) [Z0 Z3]\n + (-0.125) [Z1 Z2]\n + (0.125) [Z1 Z3]\n + (0.375) [Z2]\n + (0.375) [Z3]\n + (-0.375) [Z2 Z3]\n + (0.125) [Y0 X1 Y2 X3]\n + (0.125) [Y0 Y1 X2 X3]\n + (0.125) [Y0 Y1 Y2 Y3]\n + (-0.125) [Y0 X1 X2 Y3]\n + (-0.125) [X0 Y1 Y2 X3]\n + (0.125) [X0 X1 X2 X3]\n + (0.125) [X0 X1 Y2 Y3]\n + (0.125) [X0 Y1 X2 Y3]\n\n >>> S2 = spin2(electrons, orbitals, mapping=\"jordan_wigner\", wires=['w0','w1','w2','w3'])\n >>> print(S2)\n (0.75) [Iw0]\n + (0.375) [Zw1]\n + (-0.375) [Zw0 Zw1]\n + (0.125) [Zw0 Zw2]\n + (0.375) [Zw0]\n + (-0.125) [Zw0 Zw3]\n + (-0.125) [Zw1 Zw2]\n + (0.125) [Zw1 Zw3]\n + (0.375) [Zw2]\n + (0.375) [Zw3]\n + (-0.375) [Zw2 Zw3]\n + (0.125) [Yw0 Xw1 Yw2 Xw3]\n + (0.125) [Yw0 Yw1 Xw2 Xw3]\n + (0.125) [Yw0 Yw1 Yw2 Yw3]\n + (-0.125) [Yw0 Xw1 Xw2 Yw3]\n + (-0.125) [Xw0 Yw1 Yw2 Xw3]\n + (0.125) [Xw0 Xw1 Xw2 Xw3]\n + (0.125) [Xw0 Xw1 Yw2 Yw3]\n + (0.125) [Xw0 Yw1 Xw2 Yw3]\n \"\"\"\n\n if electrons <= 0:\n raise ValueError(\n \"'electrons' must be greater than 0; got for 'electrons' {}\".format(electrons)\n )\n\n if orbitals <= 0:\n raise ValueError(\n \"'orbitals' must be greater than 0; got for 'orbitals' {}\".format(orbitals)\n )\n\n sz = np.where(np.arange(orbitals) % 2 == 0, 0.5, -0.5)\n\n table = _spin2_matrix_elements(sz)\n\n return observable(table, init_term=3 / 4 * electrons, mapping=mapping, wires=wires)\n\n\ndef observable(me_table, init_term=0, mapping=\"jordan_wigner\", wires=None):\n\n r\"\"\"Builds the many-body observable whose expectation value can be\n measured in PennyLane.\n\n This function can be used to build second-quantized operators in the basis\n of single-particle states (e.g., HF states) and to transform them into\n PennyLane observables. In general, single- and two-particle operators can be\n expanded in a defined active space,\n\n .. math::\n\n &&\\hat A = \\sum_{\\alpha \\leq 2n_\\mathrm{docc}} \\langle \\alpha \\vert \\hat{\\mathcal{A}}\n \\vert \\alpha \\rangle ~ \\hat{n}_\\alpha +\n \\sum_{\\alpha, \\beta ~ \\in ~ \\mathrm{active~space}} \\langle \\alpha \\vert \\hat{\\mathcal{A}}\n \\vert \\beta \\rangle ~ \\hat{c}_\\alpha^\\dagger\\hat{c}_\\beta \\\\\n &&\\hat B = \\frac{1}{2} \\left\\{ \\sum_{\\alpha, \\beta \\leq 2n_\\mathrm{docc}}\n \\langle \\alpha, \\beta \\vert \\hat{\\mathcal{B}} \\vert \\beta, \\alpha \\rangle\n ~ \\hat{n}_\\alpha \\hat{n}_\\beta + \\sum_{\\alpha, \\beta, \\gamma, \\delta ~\n \\in ~ \\mathrm{active~space}} \\langle \\alpha, \\beta \\vert \\hat{\\mathcal{B}}\n \\vert \\gamma, \\delta \\rangle ~ \\hat{c}_{\\alpha}^\\dagger \\hat{c}_{\\beta}^\\dagger\n \\hat{c}_{\\gamma} \\hat{c}_{\\delta} \\right\\}.\n\n In the latter equations :math:`n_\\mathrm{docc}` denotes the doubly-occupied orbitals,\n if any, not included in the active space and\n :math:`\\langle \\alpha \\vert \\hat{\\mathcal{A}} \\vert \\beta \\rangle` and\n :math:`\\langle \\alpha, \\beta \\vert\\hat{\\mathcal{B}} \\vert \\gamma, \\delta \\rangle`\n are the matrix elements of the one- and two-particle operators\n :math:`\\hat{\\mathcal{A}}` and :math:`\\hat{\\mathcal{B}}`, respectively.\n\n The function utilizes tools of `OpenFermion `_\n to build the second-quantized operator and map it to basis of Pauli matrices via the\n Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is\n converted to a a PennyLane observable by the function :func:`~.convert_observable`.\n\n Args:\n me_table (array[float]): Numpy array with the table of matrix elements.\n For single-particle operators this array will have shape\n ``(me_table.shape[0], 3)`` with each row containing the indices\n :math:`\\alpha`, :math:`\\beta` and the matrix element :math:`\\langle \\alpha \\vert\n \\hat{\\mathcal{A}}\\vert \\beta \\rangle`. For two-particle operators this\n array will have shape ``(me_table.shape[0], 5)`` with each row containing\n the indices :math:`\\alpha`, :math:`\\beta`, :math:`\\gamma`, :math:`\\delta` and\n the matrix elements :math:`\\langle \\alpha, \\beta \\vert \\hat{\\mathcal{B}}\n \\vert \\gamma, \\delta \\rangle`.\n init_term: the contribution of doubly-occupied orbitals, if any, or other quantity\n required to initialize the many-body observable.\n mapping (str): specifies the fermion-to-qubit mapping. Input values can\n be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.\n wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator\n to an observable measurable in a PennyLane ansatz.\n For types Wires/list/tuple, each item in the iterable represents a wire label\n corresponding to the qubit number equal to its index.\n For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.\n If None, will use identity map (e.g. 0->0, 1->1, ...).\n\n Returns:\n pennylane.Hamiltonian: the fermionic-to-qubit transformed observable\n\n **Example**\n\n >>> table = np.array([[0.0, 0.0, 0.4], [1.0, 1.0, -0.5], [1.0, 0.0, 0.0]])\n >>> print(observable(table, init_term=1 / 4, mapping=\"bravyi_kitaev\"))\n (0.2) [I0]\n + (-0.2) [Z0]\n + (0.25) [Z0 Z1]\n >>> print(observable(table, init_term=1 / 4, mapping=\"bravyi_kitaev\", wires=['w0','w1']))\n (0.2) [Iw0]\n + (-0.2) [Zw0]\n + (0.25) [Zw0 Zw1]\n \"\"\"\n\n if mapping.strip().lower() not in (\"jordan_wigner\", \"bravyi_kitaev\"):\n raise TypeError(\n \"The '{}' transformation is not available. \\n \"\n \"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'.\".format(mapping)\n )\n\n sp_op_shape = (3,)\n tp_op_shape = (5,)\n for i_table in me_table:\n if np.array(i_table).shape not in (sp_op_shape, tp_op_shape):\n raise ValueError(\n \"expected entries of 'me_table' to be of shape (3,) or (5,) ; got {}\".format(\n np.array(i_table).shape\n )\n )\n\n # Initialize the FermionOperator\n mb_obs = FermionOperator() + FermionOperator(\"\") * init_term\n\n for i in me_table:\n\n if i.shape == (5,):\n # two-particle operator\n mb_obs += FermionOperator(\n ((int(i[0]), 1), (int(i[1]), 1), (int(i[2]), 0), (int(i[3]), 0)), i[4]\n )\n elif i.shape == (3,):\n # single-particle operator\n mb_obs += FermionOperator(((int(i[0]), 1), (int(i[1]), 0)), i[2])\n\n # Map the fermionic operator to a qubit operator\n if mapping.strip().lower() == \"bravyi_kitaev\":\n return structure.convert_observable(bravyi_kitaev(mb_obs), wires=wires)\n\n return structure.convert_observable(jordan_wigner(mb_obs), wires=wires)\n\n\ndef spin_z(orbitals, mapping=\"jordan_wigner\", wires=None):\n r\"\"\"Computes the total spin projection operator :math:`\\hat{S}_z` in the Pauli basis.\n\n The total spin projection operator :math:`\\hat{S}_z` is given by\n\n .. math::\n\n \\hat{S}_z = \\sum_{\\alpha, \\beta} \\langle \\alpha \\vert \\hat{s}_z \\vert \\beta \\rangle\n ~ \\hat{c}_\\alpha^\\dagger \\hat{c}_\\beta, ~~ \\langle \\alpha \\vert \\hat{s}_z\n \\vert \\beta \\rangle = s_{z_\\alpha} \\delta_{\\alpha,\\beta},\n\n where :math:`s_{z_\\alpha} = \\pm 1/2` is the spin-projection of the single-particle state\n :math:`\\vert \\alpha \\rangle`. The operators :math:`\\hat{c}^\\dagger` and :math:`\\hat{c}`\n are the particle creation and annihilation operators, respectively.\n\n Args:\n orbitals (str): Number of *spin* orbitals. If an active space is defined, this is\n the number of active spin-orbitals.\n mapping (str): Specifies the transformation to map the fermionic operator to the\n Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.\n wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator\n to an observable measurable in a PennyLane ansatz.\n For types Wires/list/tuple, each item in the iterable represents a wire label\n corresponding to the qubit number equal to its index.\n For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.\n If None, will use identity map (e.g. 0->0, 1->1, ...).\n\n Returns:\n pennylane.Hamiltonian: the total spin projection observable :math:`\\hat{S}_z`\n\n **Example**\n\n >>> orbitals = 4\n >>> Sz = spin_z(orbitals, mapping=\"jordan_wigner\")\n >>> print(Sz)\n (-0.25) [Z0]\n + (0.25) [Z1]\n + (-0.25) [Z2]\n + (0.25) [Z3]\n \"\"\"\n\n if orbitals <= 0:\n raise ValueError(\n \"'orbitals' must be greater than 0; got for 'orbitals' {}\".format(orbitals)\n )\n\n r = np.arange(orbitals)\n sz_orb = np.where(np.arange(orbitals) % 2 == 0, 0.5, -0.5)\n table = np.vstack([r, r, sz_orb]).T\n\n return observable(table, mapping=mapping, wires=wires)\n\n\ndef particle_number(orbitals, mapping=\"jordan_wigner\", wires=None):\n r\"\"\"Computes the particle number operator :math:`\\hat{N}=\\sum_\\alpha \\hat{n}_\\alpha`\n in the Pauli basis.\n\n The particle number operator is given by\n\n .. math::\n\n \\hat{N} = \\sum_\\alpha \\hat{c}_\\alpha^\\dagger \\hat{c}_\\alpha,\n\n where the index :math:`\\alpha` runs over the basis of single-particle states\n :math:`\\vert \\alpha \\rangle`, and the operators :math:`\\hat{c}^\\dagger` and :math:`\\hat{c}` are\n the particle creation and annihilation operators, respectively.\n\n Args:\n orbitals (int): Number of *spin* orbitals. If an active space is defined, this is\n the number of active spin-orbitals.\n mapping (str): Specifies the transformation to map the fermionic operator to the\n Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.\n wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator\n to an observable measurable in a PennyLane ansatz.\n For types Wires/list/tuple, each item in the iterable represents a wire label\n corresponding to the qubit number equal to its index.\n For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.\n If None, will use identity map (e.g. 0->0, 1->1, ...).\n\n Returns:\n pennylane.Hamiltonian: the fermionic-to-qubit transformed observable\n\n **Example**\n\n >>> orbitals = 4\n >>> N = particle_number(orbitals, mapping=\"jordan_wigner\")\n >>> print(N)\n (2.0) [I0]\n + (-0.5) [Z0]\n + (-0.5) [Z1]\n + (-0.5) [Z2]\n + (-0.5) [Z3]\n >>> N = particle_number(orbitals, mapping=\"jordan_wigner\", wires=['w0','w1','w2','w3'])\n >>> print(N)\n (2.0) [Iw0]\n + (-0.5) [Zw0]\n + (-0.5) [Zw1]\n + (-0.5) [Zw2]\n + (-0.5) [Zw3]\n \"\"\"\n\n if orbitals <= 0:\n raise ValueError(\n \"'orbitals' must be greater than 0; got for 'orbitals' {}\".format(orbitals)\n )\n\n r = np.arange(orbitals)\n table = np.vstack([r, r, np.ones([orbitals])]).T\n\n return observable(table, mapping=mapping, wires=wires)\n\n\n__all__ = [\n \"observable\",\n \"particle_number\",\n \"spin_z\",\n \"spin2\",\n \"_spin2_matrix_elements\",\n]\n","sub_path":"qchem/pennylane_qchem/qchem/obs.py","file_name":"obs.py","file_ext":"py","file_size_in_byte":18011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"150233729","text":"t = int(input().strip())\nfor a0 in range(t):\n n,k = input().strip().split(' ')\n n,k = [int(n),int(k)]\n num = int(input())\n \n maxim=0\n for i in range(0,len(str(num))):\n \n prod=1\n t=str(num)[i:k+i]\n if len(t)==k:\n for u in t:\n prod=prod*int(u)\n if maxim Dict[str, Any]:\n event_dose = np.zeros(len(patient.r))\n if not sum(hits):\n output[const.OUTPUT_KEY_DOSE_MAP] += event_dose\n return output\n\n # logger.debug(\"Calculating back scatter correction factor\")\n k_bs = back_scatter_interpolation[event](np.sqrt(field_area))\n\n # logger.debug(\"Calculating reference point medium correction (air -> water)\")\n k_med = calculate_k_med(\n data_norm=normalized_data, field_area=field_area, event=event\n )\n\n output[const.OUTPUT_KEY_CORRECTION_BACK_SCATTER][event] = k_bs\n output[const.OUTPUT_KEY_CORRECTION_MEDIUM][event] = k_med\n output[const.OUTPUT_KEY_CORRECTION_TABLE][event] = k_tab[event]\n\n # logger.debug(\"Calculating event skin dose by applying each correction factor to the reference point air kerma\")\n event_dose[hits] += normalized_data.K_IRP[event]\n event_dose[hits] *= output[const.OUTPUT_KEY_CORRECTION_INVERSE_SQUARE_LAW][event]\n event_dose[hits] *= k_med\n event_dose[hits] *= k_bs\n\n temp = np.ones(len(table_hits))\n temp[table_hits] = k_tab[event]\n event_dose[hits] *= temp\n\n output[const.OUTPUT_KEY_DOSE_MAP] += event_dose\n\n return output\n","sub_path":"src/pyskindose/calculate_dose/add_correction_and_event_dose_to_output.py","file_name":"add_correction_and_event_dose_to_output.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"40851266","text":"\nimport random\nfrom random import randint\nimport threading\nfrom exchangeService.gen import currencyExchange_pb2 as c\n\nlock = threading.Lock()\n\nexchange = {c.USD: {c.PLN: 3.56, c.EUR: 0.83},\n c.PLN: {c.USD: 0.28, c.EUR: 0.23},\n c.EUR: {c.USD: 1.20, c.PLN: 4.27}}\n\n\ndef convert(from_curr, to_curr):\n\n with lock:\n random_change_value = random.uniform(0, 0.4)\n\n result = exchange[from_curr][to_curr]\n randint(0, 10)\n\n if randint(0, 10) % 4 == 0:\n result = result + random_change_value\n exchange[from_curr][to_curr] = result\n\n return round(result, 4)\n","sub_path":"zad.4/bin/exchangeService/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"303132273","text":"#!/usr/bin/env python3\n# update_produce.py - Corrects costs in produceSales.xlsx spreadsheet.\n\n# NOTE: This program assumes that \"produceSales.xlsx\" file is in the same directory.\n# Download it form \"http://nostarch.com/automatestuff\".\n\nimport openpyxl\n\nwb = openpyxl.load_workbook('produceSales.xlsx')\nsheet = wb['Sheet']\n\n# The produce types and their updated prices.\nPRICE_UPDATES = {'Garlic': 3.07,\n\t\t\t\t\t 'Celery': 1.19,\n\t\t\t\t\t 'Lemon': 1.27}\n\t\t\t\t\t \n# Loop through the rows and update the prices.\nfor row_num in range(2, sheet.max_row):\n\tproduce_name = sheet.cell(row=row_num, column=1).value\n\tif produce_name in PRICE_UPDATES:\n\t\tsheet.cell(row=row_num, column=2).value = PRICE_UPDATES[produce_name]\n\t\t\nwb.save('updated_produce_sales.xlsx')\n","sub_path":"Books/Python/Automate the Boring Stuff With Python - Al Sweigart/walkthroughs/update_produce.py","file_name":"update_produce.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"570703440","text":"# Imports from 3rd party libraries\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.express as px\r\n\r\n# Imports from this application\r\nfrom app import create_app\r\n\r\n# 2 column layout. 1st column width = 4/12\r\n# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout\r\ncolumn1 = dbc.Col(\r\n [\r\n dcc.Markdown(\r\n \"\"\"\r\n \r\n ## Song suggestion app based on a users song input\r\n\r\n This app will allow users to examine the audio features of over 160k songs collected from Spotify.\r\n\r\n \"\"\"\r\n ),\r\n dcc.Link(dbc.Button('Predict!', color='primary'), href='/predictions')\r\n ],\r\n md=4,\r\n)\r\n\r\ngapminder = px.data.gapminder()\r\nfig = px.scatter(gapminder.query(\"year==2007\"), x=\"gdpPercap\", y=\"lifeExp\", size=\"pop\", color=\"continent\",\r\n hover_name=\"country\", log_x=True, size_max=60)\r\n\r\ncolumn2 = dbc.Col(\r\n [\r\n dcc.Graph(figure=fig),\r\n ]\r\n)\r\n\r\nlayout = dbc.Row([column1, column2])","sub_path":"pages/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"19383003","text":"#!/bin/env python\n# \n# Copyright 2010 bit.ly\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom errors import DataError\nfrom pool import ConnectionPools\nfrom cursor import Cursor\n\nclass Client(object):\n \"\"\"\n Client connection to represent a remote database.\n \n Internally Client maintains a pool of connections that will live beyond the life of this object.\n \n Usage:\n db = asyncmongo.Client(host, port, dbname).connection(collectionname)\n db.find({...}, callback=...)\n \n \"\"\"\n def __init__(self, pool_id=None, **kwargs):\n self._pool = ConnectionPools.get_connection_pool(pool_id, **kwargs)\n \n def __getattr__(self, name):\n \"\"\"Get a collection by name.\n\n :Parameters:\n - `name`: the name of the collection\n \"\"\"\n return self.connection(name)\n\n def __getitem__(self, name):\n \"\"\"Get a collection by name.\n :Parameters:\n - `name`: the name of the collection to get\n \"\"\"\n return self.connection(name)\n \n def connection(self, collectionname, dbname=None):\n \"\"\"Get a cursor to a collection by name.\n\n raises `DataError` on names with unallowable characters.\n\n :Parameters:\n - `collectionname`: the name of the collection\n - `dbname`: (optional) overide the default db for a connection\n \n \"\"\"\n if not collectionname or \"..\" in collectionname:\n raise DataError(\"collection names cannot be empty\")\n if \"$\" in collectionname and not (collectionname.startswith(\"oplog.$main\") or\n collectionname.startswith(\"$cmd\")):\n raise DataError(\"collection names must not \"\n \"contain '$': %r\" % collectionname)\n if collectionname.startswith(\".\") or collectionname.endswith(\".\"):\n raise DataError(\"collecion names must not start \"\n \"or end with '.': %r\" % collectionname)\n if \"\\x00\" in collectionname:\n raise DataError(\"collection names must not contain the \"\n \"null character\")\n return Cursor(dbname or self._pool._dbname, collectionname, self._pool)\n","sub_path":"asyncmongo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483857439","text":"#!venv/bin/python\r\n# -*- coding: utf8 -*-\r\n\r\nimport iscpy\r\n\r\nwith open('named.conf') as f:\r\n conf = iscpy.ParseISCString(f.read())\r\n\r\ndefault_values = {'allow-transfer': {'213.186.3.120': True},\r\n 'file': '\"/etc/bind/zones/db.home4.fr\"',\r\n 'type': 'master'}\r\n\r\nok = False\r\nif conf.has_key('zone \"home4.fr\"'):\r\n ok = True\r\n for key, value in default_values.iteritems():\r\n if not (conf['zone \"home4.fr\"'].has_key(key) and conf['zone \"home4.fr\"'][key] == value):\r\n ok = False ; break\r\n\r\nprint('fichier de configuration %s' % ('correct' if ok else 'incorrect'))","sub_path":"MyPython/02mydhcp/manage_named.py","file_name":"manage_named.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"407578985","text":"with open('../input/day2.txt') as f:\n lines = f.readlines()\n times2 = 0\n times3 = 0\n for line in lines:\n occurences = {}\n for c in line:\n if c in occurences:\n occurences[c] += 1\n else:\n occurences[c] = 1\n for c, n in occurences.items():\n if n == 2:\n times2 += 1\n break\n for c, n in occurences.items():\n if n == 3:\n times3 += 1\n break\n print(times2 * times3)\n ","sub_path":"day2/day2-1.py","file_name":"day2-1.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118210059","text":"message = '''曲: 周杰倫\nQu: Zhou Jie Lun\nLyrics: Jay Chou'''\n\nprint(\"The full message is:\\n\",message)\nprint(\"The number of lines = \", message.count(\"\\n\") + 1) #Good for prioritizing which Mandarin karaoke characters to memorize for karaoke.\n\nindex1 = message.find(\"\\n\", 1)\nindex2 = message.find(\"\\n\", index1 + 1) #Last arg in find method is the what character positiion to begin the search.\nindex3 = message.find(\"\\n\", index2 + 1)\nprint(\"Index of 1st newline char (Start with 0. Count any invisible space char at end of line) = \", message.find(\"\\n\")) #Maybe can use this to find where Chinese characters stop and pinyin starts.\nprint(\"index2 = \", index2)\nprint(\"index3 = \", index3)\nline1 = message[:index1]\nline2 = message[index1+1:index2]\nline3 = message[index2+1:]\nprint(\"line1 = \", line1)\nprint(\"line2 = \", line2)\nprint(\"line3 = \", line3)\n\n# Start with this value.\nprint(\"\\nUsing a while loop, find the same indices:\")\nlocation = -1\nwhile True:\n location = message.find(\"\\n\", location + 1) # Find next occurrence by starting search at character +1 after previous occurrence.\n if location == -1: break # Break if not found.\n print(location)\n\npython_list = message.split('\\n')\nprint(\"python_list = message.split('\\\\n') = \", python_list)\npython_list_joined = '\\n'.join(python_list)\nprint(\"python_list_joined = '\\\\n'.join(python_list) = \", python_list_joined)\n\nmessage = '''曲: 周杰倫\nQu: Zhou Jie Lun\nLyrics: Jay Chou\n\n久未放晴的天空 依舊留著你的笑容\njiu wei fang qing de tian kong / yi jiu liu zhe ni de xiao rong\nThe sky which has long not been sunny still keeps your smile as before\n\n哭過 卻無法掩埋歉疚\nku guo / que wu fa yan mai qian jiu\nHave cried, but been unable to bury [my] guilt\n\n風箏在陰天擱淺 想念還在等待救援\nFeng zheng zai yin tian ge qian / xiang nian hai zai deng dai jiu yuan\nThe kite stranded in the gloomy sky, [my] longing is still awaiting to be rescued\n\n我拉著線 複習你給的溫柔\nwo la zhe xian / fu xi ni gei de wen rou\nI'm pulling the kite string and reviewing the tenderness you gave\n\n曝晒在一旁的寂寞\npu shai zai yi pang de ji mo \nThe loneliness that has been isolated on the side\n\n笑我給不起承諾\nxiao wo gei bu qi cheng nuo\nLaughing at the promises that I can't afford to give\n\n怎麼會怎麼會 你竟原諒了我\nzen me hui zen me hui ni jing yuan liang le wo\nHow come, how come, you've actually forgiven me\n\n我只能永遠讀著對白 讀著我給你的傷害\nwo zhi neng yong yuan du zhe dui bai / du zhe wo gei ni de shang hai\nI can only forever read the dialogue, reading the pain that I've given you\n\n我原諒不了我 就請你當作我已不在\nwo yuan liang bu liao wo / jiu qing ni dang zuo wo yi bu zai\nI cannot forgive myself, so please treat as if I'm not here anymore\n\n我睜開雙��看著空白 忘記你對我的期待\nwo zheng kai shuang yan kan zhe kong bai / wang ji ni dui wo de qi dai\nI looked on blankly with eyes wide open, [trying] to forget the expectations you had of me\n\n讀完了依賴 我很快就離開\ndu wan le yi lai / wo hen kuai jiu li kai\nAfter finish reading [my] dependence [on you], I'll leave very soon '''\n\nprint(\"The number of lines is \", message.count(\"\\n\") + 1) #Good for prioritizing which Mandarin karaoke characters to memorize for karaoke.\n\nindex1 = message.find(\"\\n\", 1)\nindex2 = message.find(\"\\n\", index1 + 1) #Last arg in find method is the what character positiion to begin the search.\nindex3 = message.find(\"\\n\", index2 + 1)\nindex4 = message.find(\"\\n\", index3 + 1)\nindex5 = message.find(\"\\n\", index4 + 1)\nindex6 = message.find(\"\\n\", index5 + 1)\nindex7 = message.find(\"\\n\", index6 + 1)\nprint(\"Index of 1st newline char (counting any invisible space characters at end of line) is at char \", message.find(\"\\n\")) #Maybe can use this to find where Chinese characters stop and pinyin starts.\nprint(\"index2 = \", index2)\nprint(\"index3 = \", index3)\nprint(\"index4 = \", index4)\nprint(\"index5 = \", index5)\nprint(\"index6 = \", index6)\nprint(\"index7 = \", index7)\nline1 = message[:index1]\nline2 = message[index1+1:index2]\nline3 = message[index2+1:index3]\nline4 = message[index3+1:index4]\nline5 = message[index4+1:index5]\nline6 = message[index5+1:index6]\nline7 = message[index6+1:]\nprint(\"line1 = \", line1)\nprint(\"line2 = \", line2)\nprint(\"line3 = \", line3)\nprint(\"line4 = \", line4)\nprint(\"line5 = \", line5)\nprint(\"line6 = \", line6)\n#print(\"line7 until end = \", line7)\n\npython_list = message.split('\\n')\nprint(\"python_list = message.split('\\\\n') = \", python_list)\n\nsong_lines = python_list\nchar_lines = []\npinyin_lines = []\nenglish_lines = []\n\n# I want 'n' for each 'n' in nums if 'n' is even\nmy_list = []\ni = 0\nfor line in song_lines:\n\tprint(\"\\nline = \", line)\n\tif i%4 == 0:\n\t print('Chinese Characters')\n\t char_lines.append(line)\n\telif i%4 == 1:\n\t\tprint('Pinyin')\n\t\tpinyin_lines.append(line)\n\telif i%4 == 2:\n\t\tprint('English')\n\t\tenglish_lines.append(line)\n\telse:\n\t print('Blank')\n\ti = i + 1\n\nprint(\"\\nchar_lines = \", char_lines)\nprint(\"\\npinyin_lines = \", pinyin_lines)\nprint(\"\\nenglish_lines = \", english_lines)\n\n\n# my_list = [n for n in nums if n%2 == 0]\n# print(\"my_list = [n for n in nums if n%2 == 0] = \", my_list)\n\n","sub_path":"divBlockMandarinLyricsAlternateLinesCharactersPinyinRL.py","file_name":"divBlockMandarinLyricsAlternateLinesCharactersPinyinRL.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"261063070","text":"from pyecharts import WordCloud\nimport pandas as pd\n\npost_data = pd.read_csv('C:\\data\\post_data.csv')\n\npost_data2 = post_data.groupby(by=['category']).agg({'views': sum}).reset_index()\nprint(post_data2)\n\nwordcloud = WordCloud(width=1300, height=620)\nwordcloud.add(\"\",\n post_data2['category'],\n post_data2['views'],\n word_size_range=[20, 100]\n )\nwordcloud.render(\"wordcloud.html\")\n","sub_path":"py10_可视化/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"246444979","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport random\nimport pygame\nimport os\nfrom pygame.locals import *\nfrom coordenadas import *\nfrom percorre import Cubo, Cadeira, Diamante\n\nos.environ[\"SDL_VIDEO_CENTERED\"]='1'\n\n\ndef random_color():\n x = random.randint(0, 255) / 255\n y = random.randint(0, 255) / 255\n z = random.randint(0, 255) / 255\n color = (x, y, z)\n return color\n\ncolors_list= []\n\nfor n in range(len(chair_faces)):\n colors_list.append(random_color())\n\n\ndef main():\n pygame.init()\n display = (900, 900)\n pygame.display.set_mode(display, DOUBLEBUF|OPENGL)\n gluPerspective(50, 1, 0.1, 50.0)\n glTranslatef(0.0, 0.0, -30)# X, Y, PROFUNDIDADE(Z)\n glRotatef(-90, 100, 100, 100)# Angulo, X, y, Z.\n \n\nmain()\n# AQUI É REFERENTE A ROTAÇÃO\nrun = True\nwhile run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n glRotatef(1, 3, -10, -45)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n Cadeira()\n pygame.display.flip()\n pygame.time.wait(10)\npygame.quit()\nquit()\n","sub_path":"Python/PyOpenGL/Atividade programação gráfica/Inicializador de coordenada/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"565036081","text":"# Suppose we're given an array of numbers like the following:\n\n# [4, 2, 4]\n\n# Could you find the majority element? A majority is defined as \"the greater part,\n# or more than half, of the total. It is a subset of a set consisting of more than half of the set's elements.\"\n\n# Let's assume that the array length is always at least one, and that there's always a majority element.\n\n# In the example above, the majority element would be 4.\n\nimport math\n\ndef find_majority(a):\n print(a)\n majority_threshold = math.floor(len(a) / 2)\n print('threshold', majority_threshold)\n counter = {}\n\n\n for i in range(0, len(a)):\n print(a[i])\n\n temp = counter.get(a[i], 0)\n temp += 1\n counter.update({a[i]: temp})\n\n for key, value in counter.items():\n if value > majority_threshold:\n print('majority:', key)\n return key\n return 'no majority'\n\n\n print(counter)\n\n# another way that is faster\n# sort the array and find the middle\n# if 0 to middle +1 are equal, then\n# it's the majority\n# [5,3,5,4,22,5,5]\n# sorted [3,4,5,5,5,5,22]\n\n\n\nresult = find_majority([4, 2, 4, 2, 1, 1, 1, 1, 1])\n1,1,1,1,2,2\nprint(result)\nassert result == 1\n","sub_path":"algodaily/majority_elements.py","file_name":"majority_elements.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"357655754","text":"from pal.model.access_mechanism import AbstractAccessMechanism\nfrom dataclasses import dataclass\n\n@dataclass()\nclass MRSRegister(AbstractAccessMechanism):\n \"\"\" Access mechanism for reading the contents of a system register into \"\"\"\n \"\"\" a general purpose register via the MRS instruciton \"\"\"\n\n op0: bytes = 0\n \"\"\" Top-level encoding of the system instruction type \"\"\"\n\n op1: bytes = 0\n \"\"\" The lowest exception level at which the access is possible \"\"\"\n\n op2: bytes = 0\n \"\"\" Sub-encoding for the system instruction type \"\"\"\n\n crn: bytes = 0\n \"\"\" Register number \"\"\"\n\n crm: bytes = 0\n \"\"\" Sub-register number \"\"\"\n\n operand_mnemonic: str = \"\"\n \"\"\" The operand mnemonic of the register to be accessed \"\"\"\n\n rt: bytes = 0b0\n \"\"\" Destination general purpose register (default = x0) \"\"\"\n\n name: str = \"mrs_register\"\n \"\"\" The name of this access mechanism \"\"\"\n\n def is_read(self):\n return True\n\n def is_write(self):\n return False\n\n def is_memory_mapped(self):\n return False\n\n def is_valid(self):\n if self.op0 > 0b11: return False\n if self.op1 > 0b111: return False\n if self.op2 > 0b111: return False\n if self.crn > 0b1111: return False\n if self.crm > 0b1111: return False\n if self.rt > 0b11111: return False\n\n return True\n\n def binary_encoded(self):\n encoding = 0b0\n header = 0b1101010100 # System register transfer instruction\n l = 0b1 # Direction of transfer (1 = read, 0 = write)\n\n encoding |= header << 22\n encoding |= l << 21\n encoding |= (self.op0 & 0b11) << 19\n encoding |= (self.op1 & 0b111) << 16\n encoding |= (self.crn & 0b1111) << 12\n encoding |= (self.crm & 0b1111) << 8\n encoding |= (self.op2 & 0b111) << 5\n encoding |= (self.rt & 0b11111)\n\n return encoding\n\n def __str__(self):\n msg = super().__str__()\n msg += \"\\n\"\n msg += \"\\tAssembler Mnemonic: {instruction} x{rt}, {operand};\\n\"\n msg = msg.format(\n instruction=\"MRS\",\n operand=self.operand_mnemonic,\n rt=self.rt\n )\n\n msg += \"\\tInstruction Encoding:\\n\"\n encoding = self.binary_encoded()\n table = \"\\t|-----------------------------|--|-----|--------|-----------|-----------|--------|-------------|\\n\"\n table += \"\\t| S3 |L | op0 | op1 | CRn | CRm | op2 | Rt |\\n\"\n table += \"\\t|31 30 29 28 27 26 25 24 23 22|21|20 19|18 17 16|15 14 13 12|11 10 9 8 |7 6 5 |4 3 2 1 0|\\n\"\n table += \"\\t|-----------------------------|--|-----|--------|-----------|-----------|--------|-------------|\\n\"\n table += \"\\t|\"\n for i in range(0, 32):\n table += str((encoding >> 31 - i) & 0b1)\n if i in [9, 10, 12, 15, 19, 23, 26]:\n table += \" |\";\n elif i == 31:\n table += \"|\";\n else:\n table += \" \";\n table += \"\\n\"\n table += \"\\t|-----------------------------|--|-----|--------|-----------|-----------|--------|-------------|\\n\"\n\n msg += table\n return msg\n","sub_path":"pal/model/armv8a/access_mechanism/mrs_register.py","file_name":"mrs_register.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"69819502","text":"import seiscomp3.Kernel, sys, os, subprocess\n\nfile_path = '/home/alireza/seiscomp3/sckiwi/Data_Provider'\n\n\nMain_Sc = file_path.split ('/')[-1]\n\nclass Module (seiscomp3.Kernel.Module):\n\tdef __init__ (self, env):\n\t\tseiscomp3.Kernel.Module.__init__(self, env, env.moduleName (__file__))\t\n\t\t\n\tdef _run (self):\t\n\t\tif os.path.exists(file_path):\n\t\t\tparams = {}\n\t\t\tparams = self.env.lockFile (self.name)\n\t\t\tparams += \" \" + \"bash %s\" % file_path\n\t\t\tself.env.start (self.name, \"run_with_lock\", params, True)\t\t\n\t\telse:\n\t\t\tmessage = 'The main script < %s > is missing. check SEISCOMP_HOME/sckiwi directory' % (Main_Sc)\n\t\t\tprint (message)\t\n\t\n","sub_path":"sckiwi_init/sckiwi_sc.py","file_name":"sckiwi_sc.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"208931545","text":"from django.shortcuts import render\nposts = [\n {\n'author':'Devesh ',\n'title':'Deblogg post 1',\n'content':'first post content',\n'date_posted': 'september 10, 2019'\n},\n {'author':'om ',\n'title':'Deblogg post 2',\n'content':'second post content',\n'date_posted': 'september 11, 2019'\n}\n\n]\n\n\ndef home(request):\n context={'posts':posts,\n 'title':'Home P'\n }\n return render(request,'Deblogg/home.html',context)\ndef about(request):\n context={'title':'about page'}\n return render(request,'Deblogg/about.html',context)","sub_path":"Deblogg/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"489297586","text":"from PyQt5 import *\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtMultimedia import *\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\nimport numpy as np\nimport sys \n\nOFFSET = 10\nSCALE_HEIGHT = 224\n\nclass Thermometer(QtWidgets.QWidget):\n def __init__(self, parent):\n super(Thermometer, self).__init__()\n self.value = 35\n\n def changeValue(self, val):\n self.value = val\n self.paintEvent(self)\n \n\n def paintEvent(self, event):\n painter = QtGui.QPainter()\n painter.begin(self)\n self.initDrawing(painter)\n self.drawTemperature(painter)\n self.drawBackground(painter)\n painter.end()\n\n def initDrawing(self, painter):\n self.normal = 35.0\n self.critical = 38.0\n self.m_min = 30.0\n self.m_max = 40.0\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n painter.translate(self.width()/2.0, 0.0)\n painter.scale(self.height()/300.0, self.height()/300.0)\n\n def drawBackground(self, painter):\n path = QtGui.QPainterPath()\n path.moveTo(-7.5, 257.0)\n path.quadTo(-12.5, 263.0, -12.5, 267.5)\n path.quadTo(-12.5, 278.0, 0.0, 280.0)\n path.quadTo(12.5, 278.0, 12.5, 267.5)\n path.moveTo(12.5, 267.5)\n path.quadTo(12.5, 263.0, 7.5, 257.0)\n path.lineTo(7.5, 25.0)\n path.quadTo(7.5, 12.5, 0, 12.5)\n path.quadTo(-7.5, 12.5, -7.5, 25.0)\n path.lineTo(-7.5, 257.0)\n p1 = QtCore.QPointF(-2.0, 0.0)\n p2 = QtCore.QPointF(12.5, 0.0)\n linearGrad = QtGui.QLinearGradient(p1, p2)\n linearGrad.setSpread(QtGui.QGradient.ReflectSpread)\n linearGrad.setColorAt(1.0, QtGui.QColor(0, 150, 255, 170))\n linearGrad.setColorAt(0.0, QtGui.QColor(255, 255, 255, 0))\n painter.setBrush(QtGui.QBrush(linearGrad))\n painter.setPen(QtCore.Qt.black)\n painter.drawPath(path)\n pen = QtGui.QPen()\n length = 12\n for i in range(33):\n pen.setWidthF(1.0)\n length = 12\n if i % 4 != 0:\n length = 8\n pen.setWidthF(0.8)\n if i % 2 != 0:\n length = 5\n pen.setWidthF(0.6)\n painter.setPen(pen)\n painter.drawLine(-7, 28+i*7, -7+length, 28+i*7)\n for i in range(9):\n num = self.m_min + i*(self.m_max-self.m_min)/8.0\n val = \"{0}\".format(num)\n fm = painter.fontMetrics()\n size = fm.size(QtCore.Qt.TextSingleLine, val)\n point = QtCore.QPointF(OFFSET, 252-i*28+size.width()/4.0)\n painter.drawText(point, val)\n\n def drawTemperature(self, painter):\n if self.value >= self.critical:\n color = QtGui.QColor(255, 0, 0)\n elif self.value >= self.normal:\n color = QtGui.QColor(0, 200, 0)\n else:\n color = QtGui.QColor(0, 0, 255)\n scale = QtGui.QLinearGradient(0.0, 0.0, 5.0, 0.0)\n bulb = QtGui.QRadialGradient(0.0, 267.0, 10.0, -5.0, 262.0)\n scale.setSpread(QtGui.QGradient.ReflectSpread)\n bulb.setSpread(QtGui.QGradient.ReflectSpread)\n color.setHsv(color.hue(), color.saturation(), color.value())\n scale.setColorAt(1.0, color)\n bulb.setColorAt(1.0, color)\n color.setHsv(color.hue(), color.saturation() - 200, color.value())\n scale.setColorAt(0.0, color)\n bulb.setColorAt(0.0, color)\n factor = self.value - self.m_min\n factor = (factor/(self.m_max-self.m_min))\n temp = SCALE_HEIGHT * factor\n height = temp + OFFSET\n painter.setPen(QtCore.Qt.NoPen)\n painter.setBrush(scale)\n painter.drawRect(-5, 252+OFFSET-height, 10, height)\n painter.setBrush(bulb)\n rect = QtCore.QRectF(-10.0, 258, 20.0, 20.0)\n painter.drawEllipse(rect)\n\n\nclass TemperatureModule():\n def __init__(self):\n app = QApplication([]) # PyQT application starts\n window = QWidget() # create a window\n\n # left group box, body temperature label\n leftGroupBox = QGroupBox('Body Temperature Label') #label\n layout1 = QVBoxLayout()\n thermometer = Thermometer(layout1)\n layout1.addWidget(thermometer)\n leftGroupBox.setLayout(layout1)\n \n # right group box, body temperature graph\n rightGroupBox = QGroupBox('Body Temperature Graph')\n layout2 = QVBoxLayout() # create a box\n tempGraph = BodyTemperature(thermometer) # instantiate BodyTemperature Class\n \n layout2.addWidget(tempGraph.label)\n layout2.addWidget(tempGraph.graphWidget) # add graphwidget into a box\n rightGroupBox.setLayout(layout2)\n\n\n # GSR Label\n gsrLabel = QGroupBox('GSR Label') #label\n gsrLabel.setLayout(QVBoxLayout())\n\n # GSR graph\n gsrGraph = QGroupBox('GSR Graph')\n gsr = GSR()\n layout3 = QVBoxLayout()\n layout3.addWidget(gsr.graphWidget)\n gsrGraph.setLayout(layout3) \n\n\n mainLayout = QGridLayout()\n mainLayout.addWidget(leftGroupBox, 0, 0)\n mainLayout.addWidget(rightGroupBox, 0, 1)\n\n mainLayout.addWidget(gsrLabel, 1, 0)\n mainLayout.addWidget(gsrGraph, 1, 1)\n\n darkMode()\n\n window.setLayout(mainLayout) # set layout inside a window\n window.show() # show window\n app.exec_()\n\n\nclass BodyTemperature():\n def __init__(self, Thermometer):\n pg.setConfigOption('background', 'w') #graph background color\n pg.setConfigOption('foreground', 'k') #graph foreground color\n\n\n #thermometer\n self.thermometer = Thermometer\n \n self.graphWidget = pg.PlotWidget(title='Body Temperature') #pyqtgraph PlotWidget Class\n \n self.graphWidget.setLabel('left', \"Temerature\", units='Celsius') # left label\n self.graphWidget.setLabel('bottom', \"Time\", units='Mili Seconds') # bottom label\n\n \n # Get initial data\n self.seconds = [-80] # seconds data array, x value\n self.temperature = [self.getBodyTemp()] # temperature data array, y value\n\n self.graphWidget.plot(self.seconds, self.temperature, clear=True) # plot initial value\n self.graphWidget.setRange(yRange=(34.0, 41.0)) # change the visible x range of the graph\n\n self.label = QtGui.QLabel()\n #self.label.setText('Temperature is normal')\n #self.label.setStyleSheet('color: green')\n \n #Timer Setup, every second update the data\n self.timer = pg.QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(20)\n \n def getBodyTemp(self):\n return np.random.uniform(36.0, 36.3)\n\n def update(self):\n temp = 0 # initialize \n if len(self.seconds) < 10: # first ten seconds\n self.seconds.append(self.seconds[len(self.seconds) - 1] + 20)\n temp = self.getBodyTemp()\n self.temperature.append(temp)\n \n else: # after ten seconds\n #self.seconds.pop(0) \n #self.seconds.append(self.seconds[8]+1) #updating the seconds\n self.temperature.pop(0)\n temp = self.getBodyTemp()\n self.temperature.append(temp) #updating the temperature\n self.graphWidget.setRange(xRange=(self.seconds[0], self.seconds[9])) #change the visible x range of the graph\n\n if temp >= 38.0:\n self.graphWidget.plot(self.seconds, self.temperature, pen='r', clear=True) # if temperature is high, set line color red\n self.label.setText('Body Temperature is too high')\n self.label.setStyleSheet('color: red')\n elif temp >= 35.0 and temp < 38.0:\n self.graphWidget.plot(self.seconds, self.temperature, pen='g', clear=True) # if temperature is normal, set line color green\n self.label.setText('Body Temperature is normal')\n self.label.setStyleSheet('color: green')\n else:\n self.graphWidget.plot(self.seconds, self.temperature, pen='b', clear=True) # if temperatre is too low, set line color blue\n self.label.setText('Body Temperature is too low')\n self.label.setStyleSheet('color: blue')\n \n self.thermometer.value = temp\n self.thermometer.repaint()\n\nclass GSR():\n def __init__(self):\n pg.setConfigOption('background', 'w') #graph background color\n pg.setConfigOption('foreground', 'k') #graph foreground color\n self.graphWidget = pg.PlotWidget(title='Galvanic Skin Response') #pyqtgraph PlotWidget Class\n\n self.graphWidget.setLabel('left', \"GSR amplitude\", units='uS') # left label\n self.graphWidget.setLabel('bottom', \"Time\", units='Seconds') # bottom label\n\n # Get initial data\n self.seconds = [0] # seconds data array, x value\n self.gsrData = [self.getGsrSignal()] # temperature data array, y value\n\n self.graphWidget.plot(self.seconds, self.gsrData, clear=True) # plot initial value\n self.graphWidget.setRange(yRange=(0, 1.5)) # change the visible x range of the graph\n self.graphWidget.setDownsampling(mode='peak') # down sampling\n\n #Timer Setup, every second update the data\n self.timer = pg.QtCore.QTimer()\n self.timer.timeout.connect(self.update)\n self.timer.start(100)\n \n def getGsrSignal(self):\n return np.random.uniform(0.3, 1.3)\n\n def update(self):\n if len(self.gsrData) < 100: # first ten seconds\n gsrSignal = self.getGsrSignal()\n self.gsrData.append(gsrSignal)\n self.seconds.append(self.seconds[len(self.seconds) - 1] + 0.1)\n self.graphWidget.plot(self.seconds, self.gsrData, pen=(255,165,0), clear=True) # update plot\n \n else: # after ten seconds\n self.gsrData.pop(0)\n gsrSignal = self.getGsrSignal()\n self.gsrData.append(gsrSignal) #updating GSR signal\n\n self.graphWidget.plot(self.seconds, self.gsrData, pen=(255,165,0), clear=True) # update plot\n \n \ndef darkMode():\n dark_palette = QPalette()\n dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.WindowText, Qt.white)\n dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))\n dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.ToolTipBase, Qt.white)\n dark_palette.setColor(QPalette.ToolTipText, Qt.black)\n dark_palette.setColor(QPalette.Text, Qt.white)\n dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))\n dark_palette.setColor(QPalette.ButtonText, Qt.white)\n dark_palette.setColor(QPalette.BrightText, Qt.red)\n dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))\n dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n dark_palette.setColor(QPalette.HighlightedText, Qt.black)\n QApplication.setPalette(dark_palette)\n\nif __name__ == '__main__':\n TemperatureModule()","sub_path":"temperatureModule/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"558385537","text":"from Crypto.Cipher import AES\nimport random\nimport string\nimport base64\n\ndef randomString(stringLength=8):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\ndef byte_xor(ba1, ba2):\n return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])\n\ndef text_to_bits(text, encoding='utf-8', errors='surrogatepass'):\n bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]\n return bits.zfill(8 * ((len(bits) + 7) // 8))\n\ndef text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):\n n = int(bits, 2)\n return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\\0'\n\ndef xor(s1,s2):\n out =''\n for i in range(0,len(s1)):\n out=out+str(1-abs(int(s1[i])-int(s2[i])))\n return out\n\ndef ecb_encrypt(message, key):\n aes = AES.new(key.encode('utf-8'), AES.MODE_ECB)\n return base64.b64encode(aes.encrypt(message.encode('utf-8'))).decode()\n\n\ndef ecb_decrypt(encrypted, key):\n aes = AES.new(key.encode('utf-8'), AES.MODE_ECB)\n return aes.decrypt(base64.b64decode(encrypted.encode('utf-8')))\n\n\ndef enc_ofb(in1,key, iv, enc):\n length=128\n key2=key[:32]\n tmp = text_to_bits(iv)[:length]\n leng = len(in1)\n out=\"\"\n i=0\n while(i+length<=leng):\n tmp = text_to_bits(enc(tmp,key2))\n tmp=tmp[12:length+12]\n tt=xor(tmp,in1[i:i+length])\n out = out + tt\n i=i+length\n\n if(i!=leng):\n tmp = text_to_bits(enc(tmp,key2))\n tmp=tmp[12:length+12]\n tt=xor(tmp,in1[i:i+length].ljust(length,'0'))\n out = out + tt\n\n\n return out\n\ndef enc_cbc(in1,key, iv, enc):\n length=128\n key2=key[:32]\n tmp = text_to_bits(iv)[:length]\n leng = len(in1)\n out=\"\"\n i=0\n while(i+length<=leng):\n tmp=xor(tmp,in1[i:i+length])\n tt=enc(tmp,key2)\n tt = text_to_bits(tt)\n out = out + tt\n tmp=tt[12:length+12]\n i=i+length\n if(i!=leng):\n tmp=xor(tmp,in1[i:i+length].ljust(length,'0'))\n tt=enc(tmp,key2)\n tt = text_to_bits(tt)\n out = out + tt\n\n return out\n\ndef dec_cbc(in1,key, iv, enc):\n length=128\n length2=172*8\n key2=key[:32]\n tmp = text_to_bits(iv)[:length]\n leng = len(in1)\n out=\"\"\n i=0\n while(i+length2<=leng):\n tmp2=in1[i+12:i+length+12]\n tt=enc(text_from_bits(in1[i:i+length2]),key2)\n tt =xor(tt.decode(\"utf-8\"),tmp)\n tmp=tmp2\n out = out + tt\n i=i+length2\n return out\n\ndef enc_ctr(in1,key, iv, enc):\n length=128\n key2=key[:32]\n tmp = iv\n counter=1\n leng = len(in1)\n out=\"\"\n i=0\n while(i+length<=leng):\n tt=enc(tmp+str(counter).rjust(16,'0'),key2)\n tt = text_to_bits(tt)\n tt=tt[12:length+12]\n tt =xor(tt,in1[i:i+length])\n out = out + tt\n counter+=1\n i=i+length\n if(i!=leng):\n tt=enc(tmp+str(counter).rjust(16,'0'),key2)\n tt = text_to_bits(tt)\n tt=tt[12:length+12]\n tt =xor(tt,in1[i:i+length].ljust(length,'0'))\n out = out + tt\n\n return out\n","sub_path":"lista2/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372293589","text":"\"\"\"django_blog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from django_blog.blog.models import Feedback\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom blog import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom blog import views\n\nfrom .views import PostCreate, PostDetails, PostUpdateView, getCategoryPost, postList,PostDeleteView, saveComment, sendmail\n\nstaff_patterns = [\n path('dashboard', views.dashboard, name=\"dashboard\"),\n path('feedback', views.showFeedback, name=\"feedback\"),\n path('save/faeedback', views.saveFeedback, name=\"save_feedback\"),\n path('categories', views.showCategories, name=\"categories\"),\n path('category/form', views.categoryForm, name=\"add_category\"),\n path('store/category', views.storeCategory, name=\"store_category\"),\n path('delete/category/', views.deleteCategory, name=\"delete_category\"),\n path('dalete/feedback/',views.deleteFeedback, name=\"delete_feedback\"),\n path('posts', postList.as_view(), name=\"posts\"),\n path('create/post', PostCreate.as_view(), name=\"add_post\"),\n path('view/post/', PostDetails.as_view(), name=\"view_post\"),\n path('update/post/', PostUpdateView.as_view(), name=\"update_post\"),\n path('delete/post/', PostDeleteView.as_view(), name=\"delete_post\"),\n path('send/mail', views.sendmail, name ='send_mail')\n \n \n]\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('contact', views.contact, name=\"contact\"),\n path('blog', views.blog, name=\"blog\"),\n path('myposts',views.myposts, name=\"myposts\"),\n path('dashboard',views.dashboard, name=\"dashboard\"),\n path('post/details/', views.getPostDetails, name=\"post_details\"),\n path('save/comment/', views.saveComment, name =\"save_comment\"),\n path('posts/category/', views.getCategoryPost, name=\"category_post\"),\n path('search', views.searchPosts, name=\"search\"),\n path('staff/', include(staff_patterns)),\n \n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"138650559","text":"import pytest\nimport requests\nimport mock\nimport string\nimport random\nimport re\nimport time\n\nfrom printnodeapi.gateway import Gateway\nfrom printnodeapi.model import Computer, Printer, PrintJob, State, Account\nfrom printnodeapi.auth import Auth\n\nfrom fixtures import *\n\nENTRY_SIZE = 5\n\ndef setup_module(module):\n gateway = create_gateway()\n gateway.TestDataGenerate()\n\ndef teardown_module(module):\n gateway = create_gateway()\n gateway.TestDataDelete()\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_create_delete_account(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n assert creator_ref == acc[\"Account\"][\"creatorRef\"]\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n acc_gateway.DeleteAccount()\n with pytest.raises(Exception) as no_account_exception:\n acc_gateway.account\n assert no_account_exception\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_create_api_key(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n acc_gateway.CreateApiKey(api_key=creator_ref)\n assert acc_gateway.account.api_keys[creator_ref]\n acc_gateway.DeleteAccount()\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_get_api_key(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n acc_gateway.CreateApiKey(api_key=creator_ref)\n apikey = acc_gateway.account.api_keys[creator_ref]\n assert acc_gateway.api_key(api_key=creator_ref) == apikey\n acc_gateway.DeleteAccount()\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_delete_api_key(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n acc_gateway.CreateApiKey(api_key=creator_ref)\n acc_gateway.DeleteApiKey(api_key=creator_ref)\n assert acc_gateway.account.api_keys == []\n acc_gateway.DeleteAccount()\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_create_delete_tag(gateway):\n time.sleep(1)\n gateway.ModifyTag(\"likes\", \"chicken\")\n assert {\"likes\": \"chicken\"} == gateway.account.tags\n gateway.DeleteTag(\"likes\")\n assert [] == gateway.account.tags\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_modify_account(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n acc_gateway.ModifyAccount(firstname=\"NotJake\")\n assert \"NotJake\" == acc_gateway.account.firstname\n acc_gateway.DeleteAccount()\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_get_clientkey(gateway):\n time.sleep(1)\n creator_ref = \"a_ref\"+get_random_string()\n acc = gateway.CreateAccount(\n firstname=\"Jake\",\n lastname=\"Torrance\",\n email=\"anemail@emails.emails\"+get_random_string(),\n password=\"password\",\n creator_ref=creator_ref,\n tags={\"likes\": \"chicken\"})\n acc_gateway = create_gateway(child_id=acc[\"Account\"][\"id\"])\n response = acc_gateway.clientkey(\n uuid=\"0a756864-602e-428f-a90b-842dee47f57e\",\n edition=\"printnode\",\n version=\"4.7.2\")\n regex = re.compile('^ck.*')\n assert regex.match(response) is not None\n acc_gateway.DeleteAccount()\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_get_clients(gateway):\n time.sleep(1)\n all_clis = gateway.clients()\n assert all_clis[0].edition == \"tyrell\"\n some_clis = gateway.clients(client_ids=\"10-15\")\n ver10 = False\n ver15 = False\n for v in some_clis:\n if v.id == 10:\n ver10 = True\n elif v.id == 15:\n ver15 = True\n assert ver10 and ver15\n recent_cli = gateway.clients(os=\"windows\")\n assert recent_cli.os == \"windows\"\n\n\n@pytest.mark.parametrize(\"gateway\", [(create_gateway())])\ndef test_modify_downloads(gateway):\n time.sleep(1)\n gateway.ModifyClientDownloads(17, False)\n cli_17 = gateway.clients(client_ids=\"17\")\n assert not cli_17[0].enabled\n","sub_path":"tests/test_accounts.py","file_name":"test_accounts.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"98894196","text":"\"\"\"General-purpose training script for image-to-image translation.\n\nThis script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and\ndifferent datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).\nYou need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').\n\nIt first creates model, dataset, and visualizer given the option.\nIt then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.\nThe script supports continue/resume training. Use '--continue_train' to resume your previous training.\n\nExample:\n Train a CycleGAN model:\n python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan\n Train a pix2pix model:\n python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA\n\nSee options/base_options.py and options/train_options.py for more training options.\nSee training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md\nSee frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md\n\"\"\"\nimport time\nimport torch\nimport numpy as np\nfrom os.path import join\nfrom options.train_options import TrainOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import Visualizer\nfrom collections import OrderedDict\n\ndef sum_loss(epoch_loss, batch_loss, iter):\n w = min(iter * opt.batch_size, dataset_size_train) - (iter - 1) * opt.batch_size\n if len(epoch_loss.keys()) == 0:\n for k in batch_loss:\n epoch_loss[k] = w * batch_loss[k]\n else:\n for k in batch_loss:\n epoch_loss[k] += w * batch_loss[k]\n return epoch_loss\n\n\nif __name__ == '__main__':\n train_opt = TrainOptions()\n\n train_opt.parser.add_argument('--datamode', type=str, default='multipie', help='data mode: multipie or lfw')\n train_opt.parser.add_argument('--crop', action='store_true', help='center crop face, for calculate the identity loss')\n train_opt.parser.add_argument('--flownetf', type=str, default='./checkpoints/flownetf/4_net_flowNet.pth', help='the path to pretrained flownetf model')\n train_opt.parser.add_argument('--flownetb', type=str, default='./checkpoints/flownetb/4_net_flowNet.pth', help='the path to pretrained flownetb model')\n train_opt.parser.add_argument('--lightcnn', type=str, default='./checkpoints/lightCNN_10_checkpoint.pth', help='the path to pretrained lightcnn model')\n train_opt.parser.add_argument('--aug', action='store_true', help='data augment')\n opt = train_opt.parse() # get training options\n\n # create dataset given opt.dataset_mode\n dataset_train = create_dataset(opt, is_val=False)\n\n # get the number of images in the dataset.\n dataset_size_train = len(dataset_train)\n print('The number of training images = %d' % dataset_size_train)\n torch.set_num_threads(4)\n\n model = create_model(opt) # create a model given opt.model\n model.setup(opt) # regular setup: load and print networks; create schedulers\n visualizer = Visualizer(opt) # create a visualizer that display/save images and plots\n total_iters = 0 # the total number of training iterations\n total_steps = 0\n for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n epoch_start_time = time.time() # timer for entire epoch\n iter_data_time = time.time() # timer for data loading per iteration\n epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch\n epoch_loss = OrderedDict()\n model.set_train()\n for i, data in enumerate(dataset_train): # inner loop within one epoch\n iter_start_time = time.time() # timer for computation per iteration\n if total_iters % opt.print_freq == 0:\n t_data = iter_start_time - iter_data_time\n visualizer.reset()\n total_iters += opt.batch_size\n epoch_iter += opt.batch_size\n data['titers'] = total_iters\n data['epoch'] = epoch\n model.set_input(data) # unpack data from dataset and apply preprocessing\n model.optimize_parameters() # calculate loss functions, get gradients, update network weights\n epoch_loss = sum_loss(epoch_loss, model.get_current_losses(), i + 1)\n if (i + 1) % opt.display_freq == 0: # display images on visdom and save images to a HTML file\n save_result = True\n model.compute_visuals()\n visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n if (i + 1) % opt.print_freq == 0: # print training losses and save logging information to the disk\n losses = model.get_current_losses()\n total_steps += 1\n t_comp = (time.time() - iter_start_time) / opt.batch_size\n visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data, total_steps)\n iter_data_time = time.time()\n model.save_networks('latest')\n if (epoch % opt.save_epoch_freq == 0 and opt.save_epoch_freq > 0 ): # cache our model every epochs\n print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))\n model.save_networks(epoch)\n for k in epoch_loss:\n epoch_loss[k] /= (dataset_size_train * 1.0)\n visualizer.print_current_losses(epoch, -1, epoch_loss, 0.0, 0.0, 0)\n print('End of epoch %d / %d \\t Time Taken: %d sec' % (\n epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))\n model.update_learning_rate() # update learning rates at the end of every epoch.","sub_path":"train_ffwm.py","file_name":"train_ffwm.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"111070364","text":"import json\nimport sys\ndef powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item\narglist = sys.argv\n#print(arglist)\nfile = open(arglist[1])\nnfa = json.load(file)\n#Describing DFA\n#print(nfa)\nnfatransfun = {}\ndfatransfun = {}\nfor transition in nfa[\"transition_function\"]:\n\t#print(transition)\n\tif (transition[0],transition[1]) in nfatransfun:\n\t\t#print(transition[0]+transition[1]+transition[2])\n\t\tnfatransfun[(transition[0],transition[1])].append(transition[2])\n\telse:\n\t\tnfatransfun[(transition[0],transition[1])] = transition[2].split()\n#print(nfatransfun)\nstartstate = nfa[\"states\"]\nallstates = [x for x in powerset(startstate)]\n#print(startstate)\n#allstates = allstates.sort()\n#print(allstates)\nfor state in allstates:\n\t#print(state)\n\tif(state==[]):\n\t\tfor letter in nfa[\"letters\"]:\n\t\t\tdfatransfun[(tuple(state),letter)]=state\n\telse:\n\t\t#print(state)\n\t\tfor letter in nfa[\"letters\"]:\n\t\t\tuniquestatereached=[]\n\t\t\t#print(letter)\n\t\t\tfor eachstate in state:\n\t\t\t\tif (eachstate,letter) in nfatransfun:\n\t\t\t\t\t#print(nfatransfun[(eachstate,letter)])\n\t\t\t\t\t\t#print(opchar)\n\t\t\t\t\tif nfatransfun[(eachstate,letter)] not in uniquestatereached:\n\t\t\t\t\t\t#print(uniquestatereached)\n\t\t\t\t\t\tuniquestatereached.append(nfatransfun[(eachstate,letter)])\n\t\t\tdfatransfun[(tuple(state),letter)] = uniquestatereached\n#print(dfatransfun)\n#print(\"**********************************\")\ndfatfinal=[]\nfor k,val in dfatransfun.items():\n\tif(val!=[]):\n\t\tval=val[0]\n\telist =[[list(k[0]),k[1],val]]\n\t#print(elist)\n\tdfatfinal.extend(elist)\n#print(dfatfinal)\n#final states\ndfatfinalstates=[]\nfor state in allstates:\n\tflag=False\n\tfor eachstate in state:\n\t\tif eachstate in nfa[\"final_states\"]:\n\t\t\tflag=True\n\tif flag:\n\t\tdfatfinalstates.append(state)\ndfa = {}\ndfa[\"states\"] = allstates\ndfa[\"letters\"]=nfa[\"letters\"]\ndfa[\"transition_function\"]=dfatfinal\ndfa[\"start_states\"]= nfa[\"start_states\"]\ndfa[\"final_states\"]=dfatfinalstates\noutputfile = open(arglist[2],'w+')\njson.dump(dfa,outputfile,separators = (',\\t' , ':'),indent =4)\n'''Q = []\nQ.append((nfa[\"start_states\"],))\n#phi is represented as Dead state D\nfor state in Q:\n\tprint(len(state))\n\tfor letter in nfa[\"letters\"]:\n\t\tfor i in range(len(state)):\n\t\t\tstatereached=[]\n\t\t\tuniquestatereached=[]\n\t\t\tfor estate in state:\n\t\t\t\tif(estate,letter) in nfatransfun:\n\t\t\t\t\tfor echar in nfatransfun[(estate,letter)]:\n\t\t\t\t\t\tif echar not in uniquestatereached:\n\t\t\t\t\t\t\tuniquestatereached.append(echar)\n\t\tif uniquestatereached:\n\t\t\tdfatransfun[(state,letter)]=uniquestatereached\n\t\t\tif tuple(uniquestatereached) not in Q:\n\t\t\t\tQ.append(tuple(uniquestatereached))\t\t\n\t\telse:\n\t\t\tdfatransfun[(state,letter)]=\"D\"\ndfatfinal=[]\nfor k,val in dfatransfun.items():\n\telist = [[k[0]],k[1],val]\n\tdfatfinal.extend(elist)\ndfa = {}\ndfa[\"states\"]= 2 ** nfa[\"states\"]\ndfa[\"letters\"]=nfa[\"letters\"]\ndfa[\"transition_function\"]=dfatfinal\nopfile = open('op.json','w+')\n#json.dump(dfa,opfile,separators=(',\\t',':'))\nprint(json.dumps(dfa,separators=(',\\t',':'),indent=2))'''","sub_path":"NFA2DFA.py","file_name":"NFA2DFA.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"80151488","text":"from numpy import linspace\n\nclass Scope:\n def __init__(self, address):\n self.id = id(self)\n self.address = address\n self.channels = []\n self.channel_types = []\n\n def add_channel(self, channel):\n channel.scope = self\n\n channel_number = sum(type(ch) == type(channel) for ch in self.channels)\n\n if channel_number:\n channel.name = f'{channel.name}{channel_number}'\n\n self.channels.append(channel)\n\n def get_channel_by_name(self, name):\n return next(filter(\n lambda channel: channel.name == name,\n self.channels))\n\n def get_channel_type_by_name(self, name):\n return next(filter(\n lambda channel_type: channel_type.__name__ == name,\n self.channel_types))\n","sub_path":"backend/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"77892518","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef helper(root, s, curr):\n if root:\n curr = curr * 10 + root.val\n if not root.left and not root.right:\n s[0] += curr\n helper(root.left, s, curr)\n helper(root.right, s, curr)\n return s[0]\n return 0\n \n\nclass Solution:\n def sumNumbers(self, root):\n # # SOLUTION 1 - Recursive Preorder Traversal\n # # Time O(n), Space O(h)\n\n # # We can build the number starting from the root down\n # # When we get to a leaf we can add the number to\n # # the sum.\n\n # return helper(root, [0], 0)\n \n # SOLUTION 2 - Iterative Preorder Traversal\n if not root:\n return 0\n \n stack, s = [(root, 0)], 0\n \n while stack:\n node, curr = stack.pop()\n curr = curr * 10 + node.val\n \n if not node.right and not node.left:\n s += curr\n continue\n \n if node.right:\n stack.append((node.right, curr))\n \n if node.left:\n stack.append((node.left, curr))\n \n return s\n\n","sub_path":"0129-Sum-Root-to-Leaf-Numbers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"467570701","text":"#coding=utf8\n#coding=utf8\n#coding=utf8\n#coding=utf8\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MeanShift\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import Birch\nfrom sklearn.cluster import estimate_bandwidth\nfrom sklearn.cluster import AffinityPropagation\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.impute import SimpleImputer\nfrom hyperopt import fmin,tpe,hp,partial\nfrom sklearn.externals import joblib\nfrom sklearn.decomposition import PCA\nimport pymongo\nimport sys\n\n#读取数据文件后构建文件的类\nclass Bunch(dict):\n def __init__(self, **kwargs):\n dict.__init__(self, kwargs)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(key)\n\n def __getstate__(self):\n return self.__dict__\n\n#评估标准,采取的是轮廓系数评价指标\ndef sil_score(data,labels):\n try:\n score = silhouette_score(data,labels)\n except Exception as err:\n # print(err)\n #报错返回 -1\n return -1\n else:\n return score\n\n#读取文件操作\ndef load_data(filename):\n print(filename)\n data_file = pd.read_excel(filename)\n\n #对于分类的数据集,直接取出最后一列标签向量\n # data_file.drop([data_file.columns.values[-1]], axis = 1, inplace = True)\n\n data_file = pd.get_dummies(data_file) #处理字符串\n\n imr = SimpleImputer(missing_values=np.nan, strategy='mean') #处理空缺值\n imr = imr.fit(data_file)\n data = imr.transform(data_file.values)\n n_samples = data.shape[0]\n\n feature_names = data_file.columns.values\n n_features = feature_names.shape[0]\n\n return Bunch(sample=n_samples, features=n_features, data=data,\n feature_names=feature_names)\n\n#选择算法操作,返回最好的模型参数,预测标签,得分情况,最好的模型(用于存储)\ndef select_algorithm(data,algorithm):\n pre_label = []\n sil_score = 0.0\n best = {}\n try:\n if algorithm == 'dbscan':\n best,pre_label, sil_score,model = dbscan(data)\n elif algorithm == 'kmeans':\n best,pre_label, sil_score,model = kmeans(data)\n elif algorithm == 'meanshift':\n best,pre_label, sil_score,model = meanshift(data)\n elif algorithm == 'agglomerative':\n best,pre_label, sil_score,model = agglomerative(data)\n elif algorithm == 'ward':\n best,pre_label, sil_score,model = ward(data)\n elif algorithm == 'birch':\n best,pre_label, sil_score,model = birch(data)\n elif algorithm == 'affinity':\n best,pre_label, sil_score,model = affinity(data)\n except Exception as err:\n return {},[],err,[]\n else:\n return best,pre_label, sil_score,model\n\n#聚类算法\n#DBSCAN\ndef hyper_dbscan(args):\n global data_file\n db = DBSCAN(eps = args[\"eps\"], min_samples = int(args[\"min_samples\"]), metric = args[\"metric\"],n_jobs=-1)\n pred = db.fit_predict(data_file.data)\n temp = sil_score(data_file.data,pred)\n return -temp\n\ndef dbscan(data):\n metric_list = ['euclidean','manhattan','chebyshev']\n #对维度较大的数据,\n if data.shape[0] < 30:\n space = {\n \"eps\": hp.uniform(\"eps\", 0, 2),\n \"min_samples\": hp.choice(\"min_samples\", range(2, data.shape[0]-1)),\n \"metric\": hp.choice(\"metric\", metric_list)\n }\n else:\n space = {\n \"eps\": hp.uniform(\"eps\", 0, 2),\n \"min_samples\": hp.choice(\"min_samples\", range(2, 30)),\n \"metric\": hp.choice(\"metric\", metric_list)\n }\n algo = partial(tpe.suggest,n_startup_jobs = 10)\n best = fmin(hyper_dbscan, space, algo = algo, max_evals = 50)\n model = DBSCAN(eps = best[\"eps\"], min_samples = int(best[\"min_samples\"]+2), metric = metric_list[best[\"metric\"]])\n return best,model.fit_predict(data),sil_score(data,model.fit_predict(data)),model.fit(data)\n\n#KMeans\ndef hyper_kmeans(args):\n global data_file\n km = KMeans(n_clusters = int(args[\"n_iter\"]),init = 'k-means++',n_init = 10,max_iter = 300,random_state = 0,n_jobs=-1)\n km.fit(data_file.data)\n pred = km.predict(data_file.data)\n temp = sil_score(data_file.data,pred)\n return -temp\n\ndef kmeans(data):\n if data.shape[0] < 30:\n space = {\n \"n_iter\": hp.choice(\"n_iter\", range(1, data.shape[0]))\n }\n else:\n space = {\n \"n_iter\": hp.choice(\"n_iter\", range(1, 30))\n }\n algo = partial(tpe.suggest, n_startup_jobs=10)\n best = fmin(hyper_kmeans, space, algo=algo, max_evals = 50)\n model = KMeans(n_clusters = int(best[\"n_iter\"]+1),init = 'k-means++',n_init = 10,max_iter = 300,random_state = 0)\n model.fit(data)\n return best,model.predict(data),sil_score(data,model.predict(data)),model\n\n#MeanShift\ndef hyper_meanshift(args):\n global data_file\n ms = MeanShift(bandwidth = args['bandwidth'],min_bin_freq = int(args['min_bin_freq']),n_jobs=-1)\n pred = ms.fit_predict(data_file.data)\n temp = sil_score(data_file.data,pred)\n return -temp\n\ndef meanshift(data):\n bandwidth = estimate_bandwidth(data)\n if(bandwidth - bandwidth/2) <0 and (bandwidth + bandwidth/2) >0:\n space = {\n 'bandwidth': hp.uniform('bandwidth', 0, bandwidth + bandwidth / 2),\n 'min_bin_freq': hp.choice('min_bin_freq', range(1, 30))\n }\n elif (bandwidth + bandwidth/2) <=0 :\n space = {\n 'bandwidth': hp.uniform('bandwidth', 0.1, 1.5),\n 'min_bin_freq': hp.choice('min_bin_freq', range(1, 30))\n }\n else:\n space = {\n 'bandwidth': hp.uniform('bandwidth', bandwidth - bandwidth / 2, bandwidth + bandwidth / 2),\n 'min_bin_freq': hp.choice('min_bin_freq', range(1, 30))\n }\n algo = partial(tpe.suggest,n_startup_jobs = 10)\n if data.shape[0] <1000:\n best = fmin(hyper_meanshift, space, algo=algo, max_evals=100)\n else:\n best = fmin(hyper_meanshift, space, algo=algo, max_evals=30)\n model = MeanShift(bandwidth = best['bandwidth'], min_bin_freq = int(best['min_bin_freq']+1))\n return best,model.fit_predict(data),sil_score(data,model.fit_predict(data)),model.fit(data)\n\n#Agglomerative 三种连接方式:min、max、avg\ndef hyper_agglomerative(args):\n global data_file\n ag = AgglomerativeClustering(n_clusters = int(args['n_clusters']), affinity = args['affinity'], linkage = args['linkage'])\n pred = ag.fit_predict(data_file.data)\n temp = sil_score(data_file.data,pred)\n return -temp\n\ndef agglomerative(data):\n linkage_list = ['complete', 'average', 'single']\n affinity_list = ['euclidean', 'l1', 'l2', 'manhattan', 'cosine']\n if data.shape[0] < 30:\n space = {\n 'n_clusters': hp.choice('n_clusters', range(1, data.shape[0])),\n 'affinity': hp.choice('affinity', affinity_list),\n 'linkage': hp.choice('linkage', linkage_list)\n }\n else:\n space = {\n 'n_clusters': hp.choice('n_clusters', range(1, 30)),\n 'affinity': hp.choice('affinity', affinity_list),\n 'linkage': hp.choice('linkage', linkage_list)\n }\n algo = partial(tpe.suggest,n_startup_jobs = 10)\n best = fmin(hyper_agglomerative, space, algo = algo, max_evals =50)\n model = AgglomerativeClustering(n_clusters = int(best['n_clusters']+1), affinity = affinity_list[best['affinity']], linkage = linkage_list[best['linkage']])\n return best,model.fit_predict(data),sil_score(data,model.fit_predict(data)),model.fit(data)\n\n#ward 使用Agglomerative中的ward方法\ndef hyper_ward(args):\n global data_file\n wd = AgglomerativeClustering(n_clusters=int(args['n_clusters']), affinity='euclidean', linkage='ward')\n pred = wd.fit_predict(data_file.data)\n temp = sil_score(data_file.data, pred)\n return -temp\n\ndef ward(data):\n if data.shape[0] < 30:\n space = {\n 'n_clusters': hp.choice('n_clusters', range(1, data.shape[0])),\n }\n else:\n space = {\n 'n_clusters': hp.choice('n_clusters', range(1, 30)),\n }\n algo = partial(tpe.suggest, n_startup_jobs=10)\n best = fmin(hyper_ward, space, algo=algo, max_evals=50)\n model = AgglomerativeClustering(n_clusters=int(best['n_clusters'] + 1), affinity='euclidean',\n linkage='ward')\n return best,model.fit_predict(data), sil_score(data, model.fit_predict(data)),model.fit(data)\n\n#Birch\ndef hyper_birch(args):\n global data_file\n bir = Birch(threshold=args['threshold'], branching_factor=int(args['branching_factor']))\n pred = bir.fit_predict(data_file.data)\n temp = sil_score(data_file.data, pred)\n return -temp\n\ndef birch(data):\n space = {\n 'threshold': hp.uniform('threshold', 0, 1),\n 'branching_factor': hp.choice('branching_factor', range(25,75)),\n }\n algo = partial(tpe.suggest, n_startup_jobs=10)\n best = fmin(hyper_birch, space, algo=algo, max_evals=50)\n model = Birch(threshold=best['threshold'], branching_factor=int(best['branching_factor'] + 25))\n return best,model.fit_predict(data), sil_score(data, model.fit_predict(data)),model.fit(data)\n\n# Affinity propagation\ndef hyper_affinity(args):\n global data_file\n ap = AffinityPropagation(damping = args['damping'])\n pred = ap.fit_predict(data_file.data)\n temp = sil_score(data_file.data, pred)\n return -temp\n\ndef affinity(data):\n space = {\n 'damping': hp.uniform('damping',0.5, 0.99)\n }\n algo = partial(tpe.suggest, n_startup_jobs=10)\n best = fmin(hyper_affinity, space, algo=algo, max_evals=30)\n model = AffinityPropagation(damping = best['damping'])\n return best,model.fit_predict(data), sil_score(data, model.fit_predict(data)),model.fit(data)\n\n#PCA降维\n#通过PCA降维将数据维度降成二维数据,从而在坐标系中显示数据,完成可视化功能\ndef visualization(data):\n print('Starting PCA')\n pca=PCA(n_components=2)\n X=pca.fit_transform(data)\n # print(X)\n print('End PCA')\n return X\n\n#链接MongoDB数据库\ndef Connectdatabase():\n conn=pymongo.MongoClient(host=\"localhost\",port=27017)\n db=conn.MongoDB_Data\n return db\n\n#插入数据操作\ndef insert(file_name,file_path,alg,username):\n algorithm_list = ['dbscan', 'kmeans', 'meanshift', 'agglomerative', 'ward', 'birch', 'affinity']\n\n date = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n user_name = username\n file_name = file_name\n algorithm_name = alg\n data = data_file.data\n feature_names = data_file.feature_names\n feature_count = data_file.features\n xy = visualization(data)\n\n start = time.time()\n parameter_list, label, score, model = select_algorithm(data, alg)\n print(label)\n end = time.time()\n\n index_temp = algorithm_list.index(alg)\n\n df_regression = pd.DataFrame(index=[file_name], columns=algorithm_list)\n df_regression_best = pd.DataFrame(index=[file_name], columns=algorithm_list)\n df_regression_label = pd.DataFrame(index=[file_name], columns=algorithm_list)\n df_regression_time = pd.DataFrame(index=[file_name], columns=algorithm_list)\n df_regression.values[0][index_temp] = score\n df_regression_best.values[0][index_temp] = parameter_list\n df_regression_label.values[0][index_temp] = label.astype(np.int).tolist()\n df_regression_time.values[0][index_temp] = end - start\n\n df_regression.to_csv(\n 'D:\\\\superlloy\\\\automl\\\\cluster\\\\selection\\\\cluster_result\\\\' + file_name.rstrip(\n '.xls') + '_' + alg + '_result.csv')\n df_regression_best.to_csv(\n 'D:\\\\superlloy\\\\automl\\\\cluster\\\\selection\\\\cluster_best\\\\' + file_name.rstrip(\n '.xls') + '_' + alg + '_best.csv')\n df_regression_label.to_csv(\n 'D:\\\\superlloy\\\\automl\\\\cluster\\\\selection\\\\cluster_label\\\\' + file_name.rstrip(\n '.xls') + '_' + alg + '_label.csv')\n df_regression_time.to_csv(\n 'D:\\\\superlloy\\\\automl\\\\cluster\\\\selection\\\\cluster_time\\\\' + file_name.rstrip(\n '.xls') + '_' + alg + '_time.csv')\n\n try:\n joblib.dump(model, 'D:\\\\superlloy\\\\automl\\\\cluster\\\\selection\\\\cluster_model\\\\' +\n file_name.rstrip('.xls') + '_' + alg + '_model.m')\n except Exception as err:\n print(err)\n\n # print(date,user_name,file_name,algorithm_name,data,result_label,score,feature_names,feature_count,parameter_list,xy)\n\n print('Insert DataBase:')\n db = Connectdatabase()\n db.ClusterModel.insert({\n \"date\": date,\n \"user_name\": user_name,\n \"file_name\": file_name,\n \"algorithm_name\": algorithm_name,\n \"data\": data.astype(np.float64).tolist(),\n \"result_label\": label.astype(np.float64).tolist(),\n \"score\": score,\n \"feature_names\": feature_names.astype('object').tolist(),\n \"feature_count\": feature_count,\n # \"parameter_list\": parameter_list,\n \"xy\": xy.astype(np.float64).tolist()\n\n })\n print('End Insert')\n\nif __name__ == '__main__':\n #python .py file_name0 file_path1 alg2 username3\n # file_name = 'auto-test.xlsx'\n # # file_path = '/Users/buming/Documents/Super_Alloy/SuperAlloy_System/superalloy/data/piki/auto-test.xlsx'\n # file_path = '/Users/buming/Documents/Super_Alloy/DataSet/datasets/train_done/cmc.xls'\n # alg = 'kmeans'\n # username = 'piki'\n #\n # data_file = load_data(file_path)\n # # global data_file\n #\n # insert(file_name,file_path,alg,username)\n # print(1)\n length_argv=len(sys.argv)\n print(length_argv)\n\n parameterlist = []\n for i in range(1, len(sys.argv)):\n para = sys.argv[i]\n parameterlist.append(para)\n print(parameterlist)\n\n data_file = load_data(parameterlist[1])\n insert(parameterlist[0], parameterlist[1], parameterlist[2], parameterlist[3])\n\n","sub_path":"superlloy/python/automlclusterselection/Database_Auto_Cluster.py","file_name":"Database_Auto_Cluster.py","file_ext":"py","file_size_in_byte":14012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"583980545","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/philewels/GitHub/MultiQC/multiqc/modules/rseqc/rseqc.py\n# Compiled at: 2019-11-20 10:26:16\n\"\"\" MultiQC module to parse output from RSeQC \"\"\"\nfrom collections import OrderedDict\nimport logging\nfrom multiqc import config\nfrom multiqc.modules.base_module import BaseMultiqcModule\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n \"\"\" RSeQC is a collection of scripts. This MultiQC module\n supports some but not all. The code for each script is split\n into its own file and adds a section to the module ooutput if\n logs are found.\"\"\"\n\n def __init__(self):\n super(MultiqcModule, self).__init__(name='RSeQC', anchor='rseqc', href='http://rseqc.sourceforge.net/', info='package provides a number of useful modules that can comprehensively evaluate high throughput RNA-seq data.')\n self.general_stats_headers = OrderedDict()\n self.general_stats_data = dict()\n n = dict()\n rseqc_sections = getattr(config, 'rseqc_sections', [])\n if len(rseqc_sections) == 0:\n rseqc_sections = ['read_distribution',\n 'gene_body_coverage',\n 'inner_distance',\n 'read_gc',\n 'read_duplication',\n 'junction_annotation',\n 'junction_saturation',\n 'infer_experiment',\n 'bam_stat']\n for sm in rseqc_sections:\n try:\n module = __import__(('multiqc.modules.rseqc.{}').format(sm), fromlist=[''])\n n[sm] = getattr(module, 'parse_reports')(self)\n if n[sm] > 0:\n log.info(('Found {} {} reports').format(n[sm], sm))\n except (ImportError, AttributeError):\n log.warn((\"Could not find RSeQC Section '{}'\").format(sm))\n\n if sum(n.values()) == 0:\n raise UserWarning\n self.general_stats_addcols(self.general_stats_data, self.general_stats_headers)","sub_path":"pycfiles/multiqc-1.8.tar/rseqc.py","file_name":"rseqc.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"366403577","text":"import numpy as np\nfrom theano import tensor as T\n\nimport keras.backend as K\nfrom custom_layers.sampling_layer import Sampling\nfrom keras.layers import Lambda, Conv1D, Conv2DTranspose, Embedding, Input, BatchNormalization, Activation, Flatten, \\\n Dense, Reshape\nfrom keras.models import Model\n\n\ndef vae_model(config_data, vocab, step):\n z_size = config_data['z_size']\n sample_in_size = config_data['max_input_length']\n sample_out_size = config_data['max_output_length']\n nclasses = len(vocab) + 2\n #last available index is reserved as start character\n intermediate_dim = config_data['intermediate_dim']\n nfilter = 128\n out_size = 200\n eps = 0.001\n anneal_start = 1000.0\n anneal_end = anneal_start + 7000.0\n # == == == == == =\n # Define Encoder\n # == == == == == =\n input_idx = Input(batch_shape=(None, sample_in_size), dtype='float32', name='character_input')\n output_idx = Input(batch_shape=(None, sample_out_size), dtype='int32', name='character_output')\n\n one_hot_weights = np.identity(nclasses)\n #oshape = (batch_size, sample_size, nclasses)\n one_hot_embeddings = Embedding(\n input_length=sample_in_size,\n input_dim=nclasses,\n output_dim=nclasses,\n weights=[one_hot_weights],\n trainable=False,\n name='one_hot_embeddings'\n )\n\n input_one_hot_embeddings = one_hot_embeddings((input_idx))\n #oshape = (batch_size, sample_size/2, 128)\n conv1 = Conv1D(filters=nfilter, kernel_size=3, strides=2, padding='same')(input_one_hot_embeddings)\n bn1 = BatchNormalization()(conv1)\n relu1 = Activation(activation='relu')(bn1)\n # oshape = (batch_size, sample_size/4, 128)\n conv2 = Conv1D(filters=2*nfilter, kernel_size=3, strides=2, padding='same')(relu1)\n bn2 = BatchNormalization()(conv2)\n relu2 = Activation(activation='relu')(bn2)\n #oshape = (batch_size, sample_size/4*256)\n flatten = Flatten()(relu2)\n #need to store the size of the representation after the convolutions -> needed for deconv later\n hidden_intermediate_enc = Dense(intermediate_dim, activation='relu', name='intermediate_encoding')(flatten)\n hidden_mean = Dense(z_size, name='mu')(hidden_intermediate_enc)\n hidden_log_sigma = Dense(z_size, name='sigma')(hidden_intermediate_enc)\n\n sampling_object = Sampling(z_size)\n sampling = sampling_object([hidden_mean, hidden_log_sigma])\n\n # == == == == == =\n # Define Decoder\n # == == == == == =\n hidden_intermediate_dec = Dense(intermediate_dim, name='intermediate_decoding')(sampling)\n decoder_upsample = Dense(int(2*nfilter*sample_out_size/4))(hidden_intermediate_dec)\n if K.image_data_format() == 'channels_first':\n output_shape = (2*nfilter, int(sample_out_size/4), 1)\n else:\n output_shape = (int(sample_out_size/4), 1, 2*nfilter)\n reshape = Reshape(output_shape)(decoder_upsample)\n #shape = (batch_size, filters)\n deconv1 = Conv2DTranspose(filters=nfilter, kernel_size=(3, 1), strides=(2, 1), padding='same')(reshape)\n bn3 = BatchNormalization()(deconv1)\n relu3 = Activation(activation='relu')(bn3)\n deconv2 = Conv2DTranspose(filters=out_size, kernel_size=(3, 1), strides=(2, 1), padding='same')(relu3)\n bn4 = BatchNormalization()(deconv2)\n relu4 = Activation(activation='relu')(bn4)\n reshape = Reshape((sample_out_size, out_size))(relu4)\n softmax = Dense(nclasses, activation='softmax')(reshape)\n\n def argmax_fun(softmax_output):\n return K.argmax(softmax_output, axis=2)\n\n def vae_loss(args):\n x_truth, x_decoded_final = args\n x_truth_flatten = K.flatten(x_truth)\n x_decoded_flat = K.reshape(x_decoded_final, shape=(-1, K.shape(x_decoded_final)[-1]))\n cross_ent = T.nnet.categorical_crossentropy(x_decoded_flat, x_truth_flatten)\n cross_ent = K.reshape(cross_ent, shape=(-1, K.shape(x_truth)[1]))\n sum_over_sentences = K.sum(cross_ent, axis=1)\n return sum_over_sentences\n\n def vae_kld_loss(args):\n mu, log_sigma = args\n\n kl_loss = - 0.5 * K.sum(1 + log_sigma - K.square(mu) - K.exp(log_sigma), axis=-1)\n kld_weight = K.clip((step - anneal_start) / (anneal_end - anneal_start), 0, 1 - eps) + eps\n return kl_loss*kld_weight\n\n def identity_loss(y_true, y_pred):\n return y_pred\n\n loss = Lambda(vae_loss, output_shape=(1,))([output_idx, softmax])\n kld_loss = Lambda(vae_kld_loss, output_shape=(1,), name='kld_loss')([hidden_mean, hidden_log_sigma])\n\n argmax = Lambda(argmax_fun, output_shape=(sample_out_size,))(softmax)\n\n train_model = Model(inputs=[input_idx, output_idx], outputs=[loss, kld_loss])\n\n test_model = Model(inputs=[input_idx], outputs=[argmax])\n\n return train_model, test_model\n","sub_path":"vae_architectures/vae_deconv_baseline_nlg.py","file_name":"vae_deconv_baseline_nlg.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"479184625","text":"###################################\n# leetcode [8] 字符串转换整数 (atoi)\n###################################\nclass Solution:\n def myAtoi(self, s: str) -> int:\n ls = list(s.strip()) # 去除两边空格\n if len(ls) == 0:\n return 0\n sign = -1 if ls[0] == '-' else 1 # 判断正副号\n if ls[0] in ['-','+']: del ls[0]\n ret, i = 0, 0\n while i < len(ls) and ls[i].isdigit():\n # 数字字符转ASCII码十进制数字相减得到数字本身,ret*10:得到的数字进位后与新得到的数字相加\n ret = ret * 10 + ord(ls[i]) - ord('0')\n i += 1\n return max(-2**31, min(sign * ret, 2**31 - 1)) # 越界判断\n\nif __name__ == \"__main__\":\n str = \" -91283472332\"\n s = Solution()\n print(s.myAtoi(str))\n","sub_path":"Week_09/myAtoi.py","file_name":"myAtoi.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632700444","text":"from random import randrange\n\ndef shuffle(arr):\n for i in reversed(range(1,len(arr))):\n j = randrange(i+1)\n arr[i],arr[j] = arr[j], arr[i]\n print(arr)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, 3, 4, 5, 6]\n shuffle(A)","sub_path":"shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"135549833","text":"\nimport unittest2 as unittest\nfrom mox import MoxTestBase, IsA\nfrom gevent.socket import socket\n\nfrom slimta.smtp.io import IO\nfrom slimta.smtp.auth import Auth, AuthSession, CredentialsInvalidError, \\\n ServerAuthError, InvalidMechanismError, \\\n AuthenticationCanceled\nfrom slimta.smtp.auth.standard import *\nfrom slimta.smtp.auth.oauth import OAuth2\n\n\nclass StaticCramMd5(CramMd5):\n\n def _build_initial_challenge(self):\n return ''\n\n\nclass FakeAuth(Auth):\n\n def verify_secret(self, cid, secret, zid=None):\n if cid != 'testuser' or secret != 'testpassword':\n raise CredentialsInvalidError()\n if zid is not None and zid != 'testzid':\n raise CredentialsInvalidError()\n return 'testidentity'\n\n def get_secret(self, cid, zid=None):\n if cid != 'testuser':\n raise CredentialsInvalidError()\n if zid is not None and zid != 'testzid':\n raise CredentialsInvalidError()\n return 'testpassword', 'testidentity'\n\n def get_available_mechanisms(self, encrypted):\n return [Plain, Login, StaticCramMd5]\n\n\nclass FakeAuthNoSecure(Auth):\n\n def get_available_mechanisms(self, encrypted):\n if encrypted:\n return [Plain, Login]\n else:\n return []\n\n\nclass FakeAuthWithGetSecret(Auth):\n\n def get_secret(self, *args):\n raise CredentialsInvalidError()\n\n\nclass FakeSession(object):\n\n def __init__(self, encrypted):\n self.encrypted = encrypted\n\n\nclass TestSmtpAuth(unittest.TestCase, MoxTestBase):\n\n def setUp(self):\n super(TestSmtpAuth, self).setUp()\n self.sock = self.mox.CreateMock(socket)\n self.sock.fileno = lambda: -1\n\n def test_get_available_mechanisms(self):\n auth = Auth()\n self.assertEqual([CramMd5], auth.get_available_mechanisms())\n self.assertEqual([CramMd5, Plain, Login],\n auth.get_available_mechanisms(True))\n\n def test_str(self):\n auth = AuthSession(FakeAuthWithGetSecret(), FakeSession(False))\n self.assertEqual('CRAM-MD5', str(auth))\n auth = AuthSession(Auth(), FakeSession(True))\n self.assertEqual('PLAIN LOGIN', str(auth))\n\n def test_str_no_secure_mechanisms(self):\n auth = AuthSession(FakeAuthNoSecure(), FakeSession(True))\n self.assertEqual('PLAIN LOGIN', str(auth))\n auth = AuthSession(FakeAuthNoSecure(), FakeSession(False))\n with self.assertRaises(ValueError):\n str(auth)\n\n def test_unimplemented_means_invalid(self):\n auth = FakeAuthWithGetSecret()\n with self.assertRaises(CredentialsInvalidError):\n auth.verify_secret('user', 'pass')\n with self.assertRaises(CredentialsInvalidError):\n auth.get_secret('user')\n\n def test_invalid_mechanism(self):\n auth = AuthSession(FakeAuth(), FakeSession(True))\n with self.assertRaises(InvalidMechanismError):\n auth.server_attempt(None, 'TEST')\n with self.assertRaises(InvalidMechanismError):\n auth.server_attempt(None, 'B@D')\n\n def test_plain_noarg(self):\n self.sock.sendall('334 \\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHppZAB0ZXN0dXNlcgB0ZXN0cGFzc3dvcmQ=\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n identity = auth.server_attempt(io, 'PLAIN')\n self.assertEqual('testidentity', identity)\n\n def test_plain(self):\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n identity = auth.server_attempt(io, 'PLAIN dGVzdHppZAB0ZXN0dXNlcgB0ZXN0cGFzc3dvcmQ=')\n self.assertEqual('testidentity', identity)\n\n def test_plain_badcreds(self):\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n with self.assertRaises(CredentialsInvalidError):\n auth.server_attempt(io, 'PLAIN dGVzdHppZAB0ZXN0dXNlcgBiYWRwYXNzd29yZA==')\n with self.assertRaises(ServerAuthError):\n auth.server_attempt(io, 'PLAIN dGVzdGluZw==')\n\n def test_plain_canceled(self):\n self.sock.sendall('334 \\r\\n')\n self.sock.recv(IsA(int)).AndReturn('*\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n with self.assertRaises(AuthenticationCanceled):\n auth.server_attempt(io, 'PLAIN')\n with self.assertRaises(AuthenticationCanceled):\n auth.server_attempt(io, 'PLAIN *')\n\n def test_login_noarg(self):\n self.sock.sendall('334 VXNlcm5hbWU6\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHVzZXI=\\r\\n')\n self.sock.sendall('334 UGFzc3dvcmQ6\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHBhc3N3b3Jk\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n identity = auth.server_attempt(io, 'LOGIN')\n self.assertEqual('testidentity', identity)\n\n def test_login(self):\n self.sock.sendall('334 UGFzc3dvcmQ6\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHBhc3N3b3Jk\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n identity = auth.server_attempt(io, 'LOGIN dGVzdHVzZXI=')\n self.assertEqual('testidentity', identity)\n\n def test_crammd5(self):\n self.sock.sendall('334 PHRlc3RAZXhhbXBsZS5jb20+\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHVzZXIgNDkzMzA1OGU2ZjgyOTRkZTE0NDJkMTYxOTI3ZGI5NDQ=\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n identity = auth.server_attempt(io, 'CRAM-MD5 dGVzdHVzZXI=')\n self.assertEqual('testidentity', identity)\n\n def test_crammd5_badcreds(self):\n self.sock.sendall('334 PHRlc3RAZXhhbXBsZS5jb20+\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('dGVzdHVzZXIgMTIzNDU2Nzg5MA==\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n with self.assertRaises(CredentialsInvalidError):\n auth.server_attempt(io, 'CRAM-MD5 dGVzdHVzZXI=')\n\n def test_crammd5_malformed(self):\n self.sock.sendall('334 PHRlc3RAZXhhbXBsZS5jb20+\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('bWFsZm9ybWVk\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n auth = AuthSession(FakeAuth(), FakeSession(True))\n with self.assertRaises(ServerAuthError):\n auth.server_attempt(io, 'CRAM-MD5 dGVzdHVzZXI=')\n\n def test_client_plain(self):\n self.sock.sendall('AUTH PLAIN amtsAHRlc3RAZXhhbXBsZS5jb20AYXNkZg==\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('235 Ok\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = Plain.client_attempt(io, 'test@example.com', 'asdf', 'jkl')\n self.assertEqual('235', reply.code)\n self.assertEqual('2.0.0 Ok', reply.message)\n\n def test_client_login(self):\n self.sock.sendall('AUTH LOGIN\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('334 VXNlcm5hbWU6\\r\\n')\n self.sock.sendall('dGVzdEBleGFtcGxlLmNvbQ==\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('334 UGFzc3dvcmQ6\\r\\n')\n self.sock.sendall('YXNkZg==\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('235 Ok\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = Login.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('235', reply.code)\n self.assertEqual('2.0.0 Ok', reply.message)\n\n def test_client_login_bad_mech(self):\n self.sock.sendall('AUTH LOGIN\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('535 Nope!\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = Login.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('535', reply.code)\n self.assertEqual('5.0.0 Nope!', reply.message)\n\n def test_client_login_bad_username(self):\n self.sock.sendall('AUTH LOGIN\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('334 VXNlcm5hbWU6\\r\\n')\n self.sock.sendall('dGVzdEBleGFtcGxlLmNvbQ==\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('535 Nope!\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = Login.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('535', reply.code)\n self.assertEqual('5.0.0 Nope!', reply.message)\n\n def test_client_crammd5(self):\n self.sock.sendall('AUTH CRAM-MD5\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('334 dGVzdCBjaGFsbGVuZ2U=\\r\\n')\n self.sock.sendall('dGVzdEBleGFtcGxlLmNvbSA1Yzk1OTBjZGE3ZTgxMDY5Mzk2ZjhiYjlkMzU1MzE1Yg==\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('235 Ok\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = CramMd5.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('235', reply.code)\n self.assertEqual('2.0.0 Ok', reply.message)\n\n def test_client_crammd5_bad_mech(self):\n self.sock.sendall('AUTH CRAM-MD5\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('535 Nope!\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = CramMd5.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('535', reply.code)\n self.assertEqual('5.0.0 Nope!', reply.message)\n\n def test_client_xoauth2(self):\n self.sock.sendall('AUTH XOAUTH2 dXNlcj10ZXN0QGV4YW1wbGUuY29tAWF1dGg9QmVhcmVyYXNkZgEB\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('235 Ok\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = OAuth2.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('235', reply.code)\n self.assertEqual('2.0.0 Ok', reply.message)\n\n def test_client_xoauth2_error(self):\n self.sock.sendall('AUTH XOAUTH2 dXNlcj10ZXN0QGV4YW1wbGUuY29tAWF1dGg9QmVhcmVyYXNkZgEB\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('334 eyJzdGF0dXMiOiI0MDEiLCJzY2hlbWVzIjoiYmVhcmVyIG1hYyIsInNjb3BlIjoiaHR0cHM6Ly9tYWlsLmdvb2dsZS5jb20vIn0K\\r\\n')\n self.sock.sendall('\\r\\n')\n self.sock.recv(IsA(int)).AndReturn('535 Nope!\\r\\n')\n self.mox.ReplayAll()\n io = IO(self.sock)\n reply = OAuth2.client_attempt(io, 'test@example.com', 'asdf', None)\n self.assertEqual('535', reply.code)\n self.assertEqual('5.0.0 Nope!', reply.message)\n\n\n# vim:et:fdm=marker:sts=4:sw=4:ts=4\n","sub_path":"test/test_slimta_smtp_auth.py","file_name":"test_slimta_smtp_auth.py","file_ext":"py","file_size_in_byte":10615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176348858","text":"from flask import Flask, request, jsonify\r\nfrom flask.ext.cors import CORS\r\nfrom pc_pp import get_part_info, get_average_price\r\n\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n@app.route('/')\r\ndef index():\r\n return \"welcome\"\r\n\r\n@app.route('/price', methods=['POST'])\r\ndef price():\r\n try:\r\n # return request\r\n code = request.get_json(force=True)[\"part_no\"]\r\n return str(get_average_price(code))\r\n except Exception as ex:\r\n return \"0\"\r\n\r\n\r\nif __name__ == '__main__':\r\n app.debug = True\r\n app.run()\r\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"63320664","text":"import os\n\nfrom wellcomeml.ml.clustering import TextClustering\nfrom wellcomeml.viz.visualize_clusters import visualize_clusters\n\n\ndef test_output_html(tmp_path):\n \"\"\"Tests that the output html is generated correclty by the clustering function\"\"\"\n\n # This will be the file to\n temporary_file = os.path.join(tmp_path, 'test-cluster.html')\n\n # Run clustering on small dummy data (see test_clustering.py)\n cluster = TextClustering(embedding_random_state=42,\n reducer_random_state=43,\n clustering_random_state=44)\n\n X = ['Wellcome Trust',\n 'The Wellcome Trust',\n 'Sir Henry Wellcome',\n 'Francis Crick',\n 'Crick Institute',\n 'Francis Harry Crick']\n\n cluster.fit(X)\n\n # Run the visualisation function with output_file=temporary_file\n visualize_clusters(clustering=cluster, output_file_path=temporary_file, radius=0.01,\n alpha=0.5, output_in_notebook=False)\n\n # Assert that the html was generated correctly\n assert os.path.exists(temporary_file)\n","sub_path":"tests/test_clustering_visualisation.py","file_name":"test_clustering_visualisation.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"242924163","text":"from abaqus import *\nfrom abaqusConstants import *\nfrom caeModules import *\nfrom driverUtils import executeOnCaeStartup\nimport os.path as path\nimport numpy as np\nexecuteOnCaeStartup()\n\no3 = session.openOdb(name='model.odb')\ntotal_frames = len(o3.steps['Step-1'].frames)\nsession.viewports['Viewport: 1'].setValues(displayedObject=o3)\nodb = session.odbs['model.odb']\nsession.fieldReportOptions.setValues(reportFormat=COMMA_SEPARATED_VALUES)\nsession.writeFieldReport(fileName='BubbleTest/L00000.csv', append=OFF, \n sortItem='Node Label', odb=odb, step=0, frame=total_frames - 1, outputPosition=NODAL, \n variable=(('U', NODAL, ((COMPONENT, 'U1'), (COMPONENT, 'U2'), (COMPONENT, 'U3'), )), ))\n\n#===============================================================================================\n # Generate the reports for the further increment in loads at each time step = 1 \n#===============================================================================================\n\nsaved_strings = list()\nload = np.linspace(0.001, 3.0, 200)\nload = load * 0.0001\nfor i in range(1, len(load)):\n \n i_string = str(i)\n step_string_i = 'Step-' + i_string\n i_plus_1_string = str(i + 1)\n step_string_i_plus_1 = 'Step-' + i_plus_1_string\n saved_strings.append([step_string_i, step_string_i_plus_1])\n\nfor i in range(0, len(saved_strings)):\n step_string_i = saved_strings[i][1]\n folderPath_string = 'BubbleTest/'\n digits = len(str(abs(i+1))) \n L_string = 'L'\n for j in range(1, 6-digits): L_string += '0'\n L_string += str(i+1)\n L_string += '.csv'\n filePath_string = path.join(folderPath_string, L_string)\n total_frames = len(o3.steps[step_string_i].frames)\n session.writeFieldReport(fileName=filePath_string, append=OFF, \n sortItem='Node Label', odb=odb, step=i+1, frame=total_frames - 1, outputPosition=NODAL,\n variable=(('U', NODAL, ((COMPONENT, 'U1'), (COMPONENT, 'U2'), (COMPONENT, 'U3'), )), ))","sub_path":"iso_one_param/model/export_csv_files.py","file_name":"export_csv_files.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"416706861","text":"import tensorflow as tf\nfrom math import sqrt\n\nfrom util import *\n\n\ndef get_num_classes():\n # The number of different cervix classifications\n return 3\n\n\ndef get_dimensions(grayscale):\n # The dimensions of the images on disk\n IMAGE_H = 64\n IMAGE_W = 48\n IMAGE_C = 1 if grayscale else 3\n\n return IMAGE_H, IMAGE_W, IMAGE_C\n\n\ndef create_input_pipeline(records, batch_size, grayscale, augment):\n record_queue = tf.train.string_input_producer(records)\n reader = tf.TFRecordReader()\n record_name, serialized_example = reader.read(record_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/filename': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n })\n\n name = features['image/filename']\n\n # The labels start at 1, but must start at 0 for sparse_softmax_cross_entropy_with_logits\n label = features['image/class/label'] - 1\n\n # Decode image\n image = tf.image.decode_jpeg(features['image/encoded'])\n image = tf.reshape(image, get_dimensions(grayscale=False))\n\n # Batch images together\n name_batch, image_batch, label_batch = tf.train.batch([name, image, label], batch_size=batch_size, capacity=1, num_threads=6)\n\n # Standardize each image in the batch\n image_batch = tf.cast(image_batch, tf.float32)\n image_batch = tf.map_fn(lambda img: tf.image.per_image_standardization(img), image_batch)\n\n # Randomly augment data\n if augment:\n image_batch = random_augment(image_batch)\n\n if grayscale:\n image_batch = rgb2gray(image_batch)\n\n return name_batch, image_batch, label_batch\n\n\ndef build_model(image_batch, batch_size, grayscale, reuse=False):\n with tf.variable_scope('model', reuse=reuse):\n IMAGE_H, IMAGE_W, IMAGE_C = get_dimensions(grayscale)\n N_PIXELS = IMAGE_H * IMAGE_W * IMAGE_C\n\n N_CLASSES = get_num_classes()\n\n # Flatten the images\n image_batch = tf.reshape(image_batch, [batch_size, N_PIXELS])\n\n # Learnable parameters\n weights = tf.get_variable('weights', initializer=tf.truncated_normal([N_PIXELS, N_CLASSES], stddev=1.0/sqrt(float(N_PIXELS))))\n biases = tf.get_variable('biases', initializer=tf.zeros([batch_size, N_CLASSES]))\n\n # Outputs\n logits = tf.matmul(image_batch, weights) + biases\n probabilities = tf.nn.softmax(logits)\n\n return logits, probabilities\n\n\ndef optimize(label_batch, logits, learning_rate, decay_lr, l2_penalty):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_batch, logits=logits)\n loss = tf.reduce_mean(cross_entropy)\n loss_with_l2_penalty = loss + l2_loss(l2_penalty)\n\n if decay_lr:\n learning_rate, global_step = decaying_learning_rate(learning_rate)\n else:\n global_step = None\n\n train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_with_l2_penalty, global_step=global_step)\n\n return loss, train_op\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"380625177","text":"import numpy as np\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph.opengl as gl\nimport sys\nfrom opensimplex import OpenSimplex\nimport pyaudio\nimport struct\n\nclass Mesh:\n\tdef __init__(self):\n\t\tself.app = QtGui.QApplication(sys.argv)\n\t\tself.view = gl.GLViewWidget()\n\t\tself.noise = OpenSimplex()\n\t\tself.offSet = 0\n\n\t\tself.verts = []\n\t\tself.faces = []\n\n\t\t#32 by 32 vertices\n\t\tfor x in range(32):\n\t\t\tfor y in range(32):\n\t\t\t\tself.verts.append([x, y, self.noise.noise2d(x, y)])\n\t\tself.verts = np.array(self.verts)\n\n\t\t#implementing faces\n\t\tfor i in range(31):\n\t\t\tfor j in range(31):\n\t\t\t\tself.faces.append([i * 32 + j, i *32 + j + 1, i * 32 + j + 32])\n\t\t\t\tself.faces.append([ i *32 + j + 1, i * 32 + j + 32 + 1, i * 32 + j + 32])\n\t\tself.faces = np.array(self.faces)\n\n\t\t#implementing colors for the faces\n\t\tself.colors = np.random.rand(len(self.faces), 4)\n\t\tself.colors = np.array(self.colors)\n\n\t\t#create mesh\n\t\tself.mesh = gl.GLMeshItem(vertexes= self.verts, faces= self.faces, faceColors= self.colors)\n\n\t\t#audio \n\t\t#self.RATE = 44100\n\t\t#self.CHUNK = 1024\n\t\t#self.audioData = None\n\t\t#self.p = pyaudio.PyAudio()\n\t\t#self.stream = self.p.open(format=pyaudio.palnt16, channels= 1, rate=self.RATE, input=True, output=True, frames_per_buffer=self.CHUNK)\n\n\t\t#get mesh to show\n\t\tself.view.show()\n\t\tself.view.setWindowTitle(\"Mesh\")\n\t\tself.mesh.setGLOptions(\"additive\")\n\t\tself.view.addItem(self.mesh)\n\n\tdef update(self):\n\t\t#update to audio\n\t\t#self.audioData = self.stream.read(self.CHUNK, exception_on_overflow=False)\n\t\t#struct.unpack()\n\n\n\n\t\tverts = []\n\t\tfaces = []\n\n\t\t#32 by 32 vertices\n\t\tfor x in range(32):\n\t\t\tfor y in range(32):\n\t\t\t\tverts.append([x, y, self.noise.noise2d(x + self.offSet, y + self.offSet)])\n\t\tverts = np.array(verts)\n\n\t\t#implementing faces\n\t\tfor i in range(31):\n\t\t\tfor j in range(31):\n\t\t\t\tfaces.append([i * 32 + j, i *32 + j + 1, i * 32 + j + 32])\n\t\t\t\tfaces.append([ i *32 + j + 1, i * 32 + j + 32 + 1, i * 32 + j + 32])\n\t\tfaces = np.array(faces)\n\n\t\t#implementing colors for faces\n\t\tcolors = np.random.rand(len(faces), 4)\n\t\tcolors = np.array(colors)\n\n\t\t#offset\n\t\tself.offSet -= .1\n\t\tself.mesh.setMeshData(vertexes= verts, faces= faces, faceColors= colors)\n\n\tdef run():\n\t\tif (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\t\tQtGui.QApplication.instance().exec_()\n\n\tdef animation(self):\n\t\ttimer = QtCore.QTimer()\n\t\ttimer.timeout.connect(self.update)\n\t\ttimer.start(75)\n\t\tMesh.run()\n\t\tself.update()\n\t\t\n\nif __name__ == '__main__':\n\tmesh = Mesh()\n\tmesh.animation()\n","sub_path":"Mesh2.py","file_name":"Mesh2.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"552269517","text":"#!/usr/bin/env python3\nfrom sys import argv\n\n\ndef chunks(l, n):\n '''Yields successive n-sized chunks from l'''\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\nwith open(argv[1], 'rb') as f:\n for line in enumerate(chunks(f.read(), 16)):\n bs = ' '.join('{:02X}'.format(c) for c in line[1])\n print('{:08X} {}'.format(line[0], bs))\n","sub_path":"easy/117/117.py","file_name":"117.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"600116793","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyfmi/examples/fmi_bouncing_ball.py\n# Compiled at: 2018-12-15 16:31:41\nimport os as O, pylab as P, numpy as N\nfrom pyfmi import load_fmu\ncurr_dir = O.path.dirname(O.path.abspath(__file__))\npath_to_fmus = O.path.join(curr_dir, 'files', 'FMUs', 'ME1.0')\npath_to_fmus2 = O.path.join(curr_dir, 'files', 'FMUs', 'ME2.0')\n\ndef run_demo(with_plots=True, version='2.0'):\n \"\"\"\n Demonstrates how to use JModelica.org for simulation of \n ME FMUs version 1.0 and 2.0.\n \"\"\"\n if version == '1.0':\n fmu_name = O.path.join(path_to_fmus, 'bouncingBall.fmu')\n else:\n fmu_name = O.path.join(path_to_fmus2, 'bouncingBall.fmu')\n model = load_fmu(fmu_name)\n res = model.simulate(final_time=2.0)\n h_res = res['h']\n v_res = res['v']\n t = res['time']\n assert N.abs(res.final('h') - 0.0424044) < 0.0001\n if with_plots:\n fig = P.figure()\n P.clf()\n P.subplot(2, 1, 1)\n P.plot(t, h_res)\n P.ylabel('Height (m)')\n P.xlabel('Time (s)')\n P.subplot(2, 1, 2)\n P.plot(t, v_res)\n P.ylabel('Velocity (m/s)')\n P.xlabel('Time (s)')\n P.suptitle('FMI Bouncing Ball')\n P.show()\n\n\nif __name__ == '__main__':\n run_demo()","sub_path":"pycfiles/PyFMI-2.5-py2.7-linux-x86_64/fmi_bouncing_ball.py","file_name":"fmi_bouncing_ball.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"313444617","text":"import unittest # para testeo TDD\nimport math # Para función sqrt()\n\nclass MathDojo:\n\tdef __init__(self):\n\t\tself.result = 0\n\tdef add(self, num, *nums):\n\t\tself.result += num\n\t\tfor n in nums:\n\t\t\tself.result += n \n\t\treturn self\n\tdef subtract(self, num, *nums):\n\t\tself.result -= num\n\t\tfor n in nums:\n\t\t\tself.result -= n \n\t\treturn self\n\n\t# Método de promedio para agilizar el cálculo de desviación estandar\n\tdef prom(self, num, *nums):\n\t\tself.result += num\n\t\tcontador = 0\n\t\tfor n in nums:\n\t\t\tself.result += n\n\t\t\tcontador += 1\n\t\t#print(self.result, contador+1)\n\t\tself.result = self.result/(contador+1)\n\t\treturn self\n\n\tdef desvest(self, num, *nums):\n\t\tpromedio = self.prom(num,*nums).result # No entra al resultado de la función sin agregar .result\n\t\tself.result = 0 # Se vuelve a iniciar self.result, porque el resultado anterior es el \n\t\t\t\t\t\t#promedio y empieza las operaciones desde este número\n\t\tcontador = 0\n\t\tself.result += (num - promedio)**2 # Se realiza la primera operación con el primer número para entrar a *nums\n\t\tfor n in nums:\n\t\t\tself.result += (n - promedio)**2\n\t\t\tcontador += 1\n\t\tself.result = math.sqrt(self.result/(contador))\n\t\treturn self\n\n# crear una instruccion:\nmd = MathDojo()\n# para probar:\nq = md.add(2).add(2,5,1).subtract(3,2).result\nprint(q)\t# debe imprimir 5\n\n# Escriba el método add y pruébelo llamándolo 3 veces, con diferentes \n# números de argumentos cada vez\n# 1\nmet1=MathDojo()\nx = met1.add(-2,-4,7,-1).result\nprint(\"x =\",x)\n# 2\nmet2=MathDojo()\ny = met1.add(1,2,3,4,5,6,7,8,9,10).add(-10,-9,-8,-7,-6,-5).result\nprint(\"y =\",y)\n# 3\nmet3=MathDojo()\nz = met3.add(3,4,2,6).add(3,2).result\nprint(\"z =\",z)\n\n# Escriba el método de subtract y pruébelo llamándolo 3 veces, con diferentes \n# números de argumentos cada vez\n# 1\nmet4=MathDojo()\nx1 = met4.subtract(-10,2,2,1).result # 0 - (-10) - 2 - 2 - 1 = 5 \nprint(\"x1 =\",x1)\n# 2\nmet5=MathDojo()\ny1 = met5.subtract(1,2,3,4,5,6,7,8,9,10).subtract(-10,-9,-8,-7,-6,-5).result\nprint(\"y1 =\",y1)\n# 3\nmet6=MathDojo()\nz1 = met6.subtract(-100).subtract(30,20).subtract(-30,-20).result\nprint(\"z1 =\",z1)\n# Se agrgan 3 pruebas de promedio\nmet7 = MathDojo()\nx2 = met7.prom(2,4,8,10).result \nprint(\"x2 (promedio) =\",x2)\nmet8 = MathDojo()\ny2 = met8.prom(3,6,9,12).result \nprint(\"y2 (promedio) =\",y2)\nmet9 = MathDojo()\nz2 = met9.prom(12,6,7,15,3,10,18,5).result \nprint(\"z2 (promedio) =\",z2)\n# Para posteriormente calcular la desviación estándar de cada una\nmet10= MathDojo()\nx3 = met10.desvest(12,6,7,15,3,10,18,5).result \nprint(\"x3 (desvest) =\",x3)\nmet11= MathDojo()\ny3 = met11.desvest(2,4,8,10).result \nprint(\"y3 (desvest) =\",y3)\nmet12 = MathDojo()\nz3 = met12.desvest(3,6,9,12).result #12,6,7,15,3,10,18,5\nprint(\"z3 (desvest) =\",z3)\n\nclass Math_Test(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.md = MathDojo()\n\tdef testadd(self):\n\t\tself.assertEqual(self.md.add(2,4,6,8,10).result, 30)\n\tdef testsubtract(self):\n\t\tself.assertEqual(self.md.subtract(2,4,6,8,10).result, -30)\n\tdef testprom (self):\n\t\tself.assertEqual(self.md.prom(1,2,3,4,5,6).result, 3.5)\n\tdef testdesvest (self):\n\t\tself.assertEqual(self.md.desvest(-4,5,6,9).result, 5.597618541248888)\nif __name__ == '__main__':\n unittest.main()","sub_path":"mat_dojo_TDD.py","file_name":"mat_dojo_TDD.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"87453714","text":"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021, 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Tests Electronic Structure Problem.\"\"\"\nimport unittest\nimport warnings\nfrom test import QiskitNatureTestCase\n\nimport json\nimport numpy as np\n\nfrom qiskit.algorithms.minimum_eigensolvers import MinimumEigensolverResult\nfrom qiskit.opflow import PauliSumOp\nfrom qiskit.opflow.primitive_ops import Z2Symmetries\nfrom qiskit.quantum_info.analysis.z2_symmetries import Z2Symmetries as Z2SparseSymmetries\n\nimport qiskit_nature.optionals as _optionals\nfrom qiskit_nature.second_q.drivers import PySCFDriver\nfrom qiskit_nature.second_q.hamiltonians import ElectronicEnergy\nfrom qiskit_nature.second_q.mappers import JordanWignerMapper\nfrom qiskit_nature.second_q.operators import SparseLabelOp\nfrom qiskit_nature.second_q.problems import ElectronicStructureProblem\nfrom qiskit_nature.second_q.properties import AngularMomentum, Magnetization, ParticleNumber\nfrom qiskit_nature.second_q.transformers import ActiveSpaceTransformer\n\n\nclass TestElectronicStructureProblem(QiskitNatureTestCase):\n \"\"\"Tests Electronic Structure Problem.\"\"\"\n\n def test_interpret(self):\n \"\"\"Tests the result interpretation method.\"\"\"\n dummy_result = MinimumEigensolverResult()\n dummy_result.eigenvalue = 1.0\n dummy_result.aux_operators_evaluated = {\n \"ParticleNumber\": (1.0, 0.0),\n \"AngularMomentum\": (2.0, 0.0),\n \"Magnetization\": (-1.0, 0.0),\n }\n\n dummy_problem = ElectronicStructureProblem(\n ElectronicEnergy.from_raw_integrals(np.zeros((2, 2)), np.zeros((2, 2, 2, 2)))\n )\n dummy_problem.hamiltonian.nuclear_repulsion_energy = 1.23\n dummy_problem.reference_energy = -4.56\n dummy_problem.properties.angular_momentum = AngularMomentum(1)\n dummy_problem.properties.magnetization = Magnetization(1)\n dummy_problem.properties.particle_number = ParticleNumber(1)\n\n elec_struc_res = dummy_problem.interpret(dummy_result)\n\n with self.subTest(\"hartree fock energy\"):\n self.assertAlmostEqual(elec_struc_res.hartree_fock_energy, -4.56)\n with self.subTest(\"nuclear repulsion energy\"):\n self.assertAlmostEqual(elec_struc_res.nuclear_repulsion_energy, 1.23)\n with self.subTest(\"computed energy\"):\n self.assertEqual(len(elec_struc_res.computed_energies), 1)\n self.assertAlmostEqual(elec_struc_res.computed_energies[0], 1.0)\n with self.subTest(\"number of particles\"):\n self.assertAlmostEqual(elec_struc_res.num_particles[0], 1.0)\n with self.subTest(\"angular momentum\"):\n self.assertAlmostEqual(elec_struc_res.total_angular_momentum[0], 2.0)\n with self.subTest(\"spin\"):\n self.assertAlmostEqual(elec_struc_res.spin[0], 1.0)\n with self.subTest(\"magnetization\"):\n self.assertAlmostEqual(elec_struc_res.magnetization[0], -1.0)\n\n @unittest.skipIf(not _optionals.HAS_PYSCF, \"pyscf not available.\")\n def test_second_q_ops_without_transformers(self):\n \"\"\"Tests that the list of second quantized operators is created if no transformers\n provided.\"\"\"\n expected_num_of_sec_quant_ops = 6\n with open(\n self.get_resource_path(\"H2_631g_ferm_op.json\", \"second_q/problems/resources\"),\n \"r\",\n encoding=\"utf8\",\n ) as file:\n expected = json.load(file)\n\n driver = PySCFDriver(basis=\"631g\")\n electronic_structure_problem = driver.run()\n\n electr_sec_quant_op, second_quantized_ops = electronic_structure_problem.second_q_ops()\n\n with self.subTest(\"Check expected length of the list of second quantized operators.\"):\n assert len(second_quantized_ops) == expected_num_of_sec_quant_ops\n with self.subTest(\"Check types in the list of second quantized operators.\"):\n for second_quantized_op in second_quantized_ops.values():\n assert isinstance(second_quantized_op, SparseLabelOp)\n with self.subTest(\"Check components of electronic second quantized operator.\"):\n assert all(\n s[0] == t[0] and np.isclose(np.abs(s[1]), np.abs(t[1]))\n for s, t in zip(sorted(expected.items()), sorted(electr_sec_quant_op.items()))\n )\n\n @unittest.skipIf(not _optionals.HAS_PYSCF, \"pyscf not available.\")\n def test_second_q_ops_with_active_space(self):\n \"\"\"Tests that the correct second quantized operator is created if an active space\n transformer is provided.\"\"\"\n expected_num_of_sec_quant_ops = 6\n with open(\n self.get_resource_path(\n \"H2_631g_ferm_op_active_space.json\", \"second_q/problems/resources\"\n ),\n \"r\",\n encoding=\"utf8\",\n ) as file:\n expected = json.load(file)\n\n driver = PySCFDriver(basis=\"631g\")\n trafo = ActiveSpaceTransformer(2, 2)\n\n electronic_structure_problem = trafo.transform(driver.run())\n electr_sec_quant_op, second_quantized_ops = electronic_structure_problem.second_q_ops()\n\n with self.subTest(\"Check expected length of the list of second quantized operators.\"):\n assert len(second_quantized_ops) == expected_num_of_sec_quant_ops\n with self.subTest(\"Check types in the list of second quantized operators.\"):\n for second_quantized_op in second_quantized_ops.values():\n assert isinstance(second_quantized_op, SparseLabelOp)\n with self.subTest(\"Check components of electronic second quantized operator.\"):\n assert all(\n s[0] == t[0] and np.isclose(np.abs(s[1]), np.abs(t[1]))\n for s, t in zip(sorted(expected.items()), sorted(electr_sec_quant_op.items()))\n )\n\n @unittest.skipIf(not _optionals.HAS_PYSCF, \"pyscf not available.\")\n def test_symmetry_sector_locator(self):\n \"\"\"Tests that the symmetry sector locator gives the right sector.\"\"\"\n driver = PySCFDriver()\n electronic_structure_problem = driver.run()\n hamiltonian, _ = electronic_structure_problem.second_q_ops()\n mapper = JordanWignerMapper()\n mapped_op = mapper.map(hamiltonian)\n expected_sector = [-1, 1, -1]\n\n with self.subTest(\"Opflow Z2Symmetries\"):\n if isinstance(mapped_op, PauliSumOp):\n mapped_op = mapped_op.primitive\n z2sym = Z2SparseSymmetries.find_z2_symmetries(mapped_op)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n sector = electronic_structure_problem.symmetry_sector_locator(z2sym, mapper)\n self.assertEqual(sector, expected_sector)\n with self.subTest(\"Opflow Z2Symmetries\"):\n if not isinstance(mapped_op, PauliSumOp):\n mapped_op = PauliSumOp(mapped_op)\n z2sym = Z2Symmetries.find_Z2_symmetries(mapped_op)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n sector = electronic_structure_problem.symmetry_sector_locator(z2sym, mapper)\n self.assertEqual(sector, expected_sector)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/second_q/problems/test_electronic_structure_problem.py","file_name":"test_electronic_structure_problem.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"509935036","text":"# This file was used to calculate the sublimation and evaporation mass fluxes\n\nimport numpy as np\nimport pandas as pd\nimport math as m\nimport datetime\nimport joblib\n\n\n# file names and path\n\nPKL_path = '/home/riqo/era5_files/'\n\nfile = 'era5_data_pickle'\n\nD = joblib.load(PKL_path + file)\n\nlsm = D['lsm']\n\nL_subl = lsm\n\nL_subl = np.where(L_subl==0, 2.26*10**6, L_subl)\nL_subl = np.where(L_subl==1, 2.83*10**6, L_subl)\n\nP_triple = 610.5 # Triple point pressure (Pa)\nT_triple = 273.16 # Triple point temperature (K)\nR_v = 461.9 # Water vapor gas constant (J/kgK)\nk = 0.4 # von Karman constant\nrho_air = 1.225 # Density of air in kg/m3\n\nZ_U = 10 # Height of wind speed measurements (m)\nZ_T = 2 # Height of temperature measurements (m)\nZ_0 = 0.002 # Surface roughness (m)\ng = 9.81 # Accelration due to gravity (m2/s)\n\nD['LE'] = np.empty((len(D['time']), len(D['latitude']),len(D['longitude'])), dtype=np.float32)\n\nfor a in range (0,len(D['time'])):\n\n print(a)\n\n U_0 = (D['u10'][a]**2 + D['v10'][a]**2)**0.5\n\n U = np.where(U_0 > 0.1, U_0, 0.1)\n\n e_surface = P_triple * np.exp((L_subl * (D['skt'][a] - T_triple)) / (R_v * D['skt'][a] * T_triple))\n q_surface = 0.622 * e_surface / (D['sp'][a] - (0.378 * e_surface))\n\n e_air = np.exp(((1/D['t2m'][a])-(1/D['d2m'][a])) * (L_subl/R_v)) * e_surface\n q_air = 0.622 * e_air / (D['sp'][a] - (0.378 * e_air))\n\n for b in range (0, len(D['latitude'])):\n\n for c in range(0, len(D['longitude'])):\n\n LE_1 = 1\n LE_2 = 100\n\n psi = 0\n psi_hq = 0\n\n while abs(((LE_2 - LE_1) / LE_1)) * 100 > 5:\n\n Denom = np.log(Z_U/Z_0) - psi\n\n Denom_hq = np.log(Z_T / Z_0) - psi_hq\n\n LE_1 = rho_air * (k**2) * (U[b,c] * (q_air[b,c] - q_surface[b,c]) / (Denom * Denom_hq))\n\n SE = (k**2) * (U[b,c] * (D['t2m'][a][b,c] - D['skt'][a][b,c])/ (Denom * Denom_hq))\n\n u_star = k * U[b,c] / Denom\n\n OL = -u_star**3 * D['t2m'][a][b,c] / (k * g * SE)\n\n Z_L = Z_U / OL\n\n if Z_L > 10:\n\n Z_L = 10\n\n if Z_L < -10:\n\n Z_L = -10\n\n if Z_L < 0:\n\n x = (1 - 16 * Z_L) ** (1 / 4)\n psi = 2 * np.log((1 + x) / 2) + np.log((1 + x ** 2) / 2) - 2 * np.arctan(x) + np.pi / 2\n psi_hq = 2 * np.log((1 + x ** 2) / 2)\n\n elif Z_L > 0 and Z_L < 1:\n\n psi = -5 * Z_L\n psi_hq = psi\n\n else:\n\n psi = -5 * (np.log(Z_L) + 1)\n psi_hq = psi\n\n LE_2 = rho_air * k ** 2 * (U[b,c] * (q_air[b,c] - q_surface[b,c]) / (Denom * Denom_hq))\n\n D['LE'][a][b,c] = LE_2\n\nD['LE'] = D['LE'] * 3600\n\nD['mass_snowfall'] = D['sf'] * 997\n\nPKL_path_save = '/home/riqo/mass_flux_files/'\n\nfile_save = 'mass_flux_pickle'\n\nf = open(PKL_path_save + file_save, \"wb\")\n\njoblib.dump(D,f)\n\nf.close()\n\n\n\n\n\n\n\n","sub_path":"Model/II_mass_flux_calculation/III_mass_flux_calculation.py","file_name":"III_mass_flux_calculation.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"578459148","text":"from __future__ import print_function, division\n\nimport os\nimport shutil\nimport time\n\nimport torch\nfrom tensorboard_logger import configure, log_value\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom train_res3d.parser_args import parser_args\nfrom data_set.data_choose import data_choose\nfrom model.model_choose import model_choose\nfrom train_res3d import train_val_model\n\n\ndef optimizer_choose(model, args):\n params = []\n for key, value in model.named_parameters():\n if key[8:16] == 'conv_off':\n params += [{'params': [value], 'lr': args.deform_lr_ratio * args.lr,\n 'weight_decay': args.weight_decay_ratio * args.wdr}]\n print('lr for {}: {}*{}, wd: {}*{}'.format(key, args.lr, args.deform_lr_ratio, args.weight_decay_ratio,\n args.wdr))\n elif key[0:3] == 'stn':\n params += [{'params': [value], 'lr': args.deform_lr_ratio * args.lr,\n 'weight_decay': args.weight_decay_ratio}]\n print('lr for {}: {}*{}, wd: {}*{}'.format(key, args.lr, args.deform_lr_ratio, args.weight_decay_ratio,\n args.wdr))\n elif key[0:6] == 'deform':\n params += [{'params': [value], 'lr': args.deform_lr_ratio * args.lr,\n 'weight_decay': args.weight_decay_ratio}]\n print('lr for {}: {}*{}, wd: {}*{}'.format(key, args.lr, args.deform_lr_ratio, args.weight_decay_ratio,\n args.wdr))\n elif key[0:4] == 'mask':\n params += [{'params': [value], 'lr': args.deform_lr_ratio * args.lr,\n 'weight_decay': args.weight_decay_ratio}]\n print('lr for {}: {}*{}, wd: {}*{}'.format(key, args.lr, args.deform_lr_ratio, args.weight_decay_ratio,\n args.wdr))\n else:\n if value.requires_grad:\n params += [{'params': [value], 'lr': args.lr}]\n if args.optimizer == 'adam':\n optimizer = torch.optim.Adam(params)\n print('----Using Adam optimizer')\n else:\n optimizer = torch.optim.SGD(params, momentum=args.momentum)\n print('----Using SGD with momentum ', args.momentum)\n return optimizer\n","sub_path":"train_res3d/optimizer_choose.py","file_name":"optimizer_choose.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637631459","text":"'''\n测试 生成对抗神经网络gan\nmnist 28*28\n全连接\n'''\n\nimport torch\nimport torch.utils.data\nimport torchvision\nimport os\nimport argparse\nfrom datetime import datetime\n\n\n# 1.定义超参数\nparser=argparse.ArgumentParser()\nparser.add_argument('--batch_size',type=int,default=200,help='Batch processing')\nparser.add_argument('--epoch',type=int,default=100,help='All data trained several times')\nparser.add_argument('--nz',type=int,default=100,help='100 dimension')\nparser.add_argument('--save_path',default='testS',help='The Path of Generating Data Set Preservation')\nopt=parser.parse_args()\n\n# 2.是否使用gpu\ndevice=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# 3.创建保存的路径\nif not os.path.exists(opt.save_path):\n os.mkdir(opt.save_path)\n\n# 4.框架\nclass MyGanD(torch.nn.Module):\n '''鉴别器'''\n def __init__(self):\n super(MyGanD,self).__init__()\n self.fc=torch.nn.Sequential(\n torch.nn.Linear(28*28,16*16),\n torch.nn.LeakyReLU(0.2, True),\n torch.nn.Linear(16 * 16, 1),\n torch.nn.Sigmoid()\n )\n\n def forward(self, inputs):\n # print(inputs.size()) # [200,1,28,28]\n out=inputs.view(opt.batch_size,-1) # torch.Size([200, 784])\n out = self.fc(out)\n return out\n\nclass MyGanG(torch.nn.Module):\n '''生成器'''\n def __init__(self):\n super(MyGanG,self).__init__()\n self.fc=torch.nn.Sequential(\n torch.nn.Linear(100,16*16),\n torch.nn.ReLU(),\n torch.nn.Linear(16*16,28*28),\n torch.nn.Tanh()\n )\n def forward(self, inputs):\n out=self.fc(inputs)\n out = out.view(-1, 1, 28, 28)\n return out\n\n\n# 5.加载模型\nGanD=MyGanD().to(device)\nGanG=MyGanG().to(device)\n\nGanD.load_state_dict(torch.load('./moxingS/GanD_99.pth'))\nGanG.load_state_dict(torch.load('./moxingS/GanG_99.pth'))\n\n\n# 6.测试\n# 想要生成多少张图片 只需要修改 batch_size 的大小即可\n\nnoise = torch.randn(opt.batch_size, opt.nz)\nfake = GanG(noise) # 得到假图\n\n# batch_size=200\nfor j in range(opt.batch_size):\n torchvision.utils.save_image(fake.data[j],\n '{}/gan_test{}.png'.format(opt.save_path,str(j)),\n normalize=True)\n\n\n\n\n\n\n\n\n","sub_path":"PyTorch 1-N/PyTorch(6)/ganS_test.py","file_name":"ganS_test.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"438915834","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: CarlSouthall\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport scipy.io.wavfile as wav\nimport numpy as np\nimport os\n\n\ndef MeanPP(Track,Lambda):\n \n m=np.mean(Track)*Lambda\n onsets=[]\n Track=np.append(Track,0)\n for i in xrange(len(Track)):\n if Track[i]>Track[i-1] and Track[i]>Track[i+1] and Track[i]> m:\n onsets=np.append(onsets,i)\n\n if len(onsets) > 0:\n onsets=(onsets*512)/float(44100) \n return onsets \n \ndef Wavread(TrackName):\n x=wav.read(TrackName)\n y=x[1]\n if len(y.shape)>1:\n y=np.squeeze(np.sum(y,axis=1)) \n y=y/float(np.max(abs(y)))\n \n return y\n\ndef arrange_output(Inputs,output_sort='time'):\n \n Names=['BD','SD','HH']\n \n Out=list(np.zeros(len(Inputs)))\n Out1=list(np.zeros(len(Inputs)))\n for i in xrange(len(Inputs)): \n \n Out[i]=list(np.zeros(len(Inputs[i])))\n Out1[i]=list(np.zeros((1,2)))\n for j in xrange(len(Inputs[i])): \n Out[i][j]=list(np.zeros((len(Inputs[i][j]))))\n for k in xrange(len(Inputs[i][j])):\n Out[i][j][k]=list(np.zeros(2))\n Out[i][j][k][0]=Inputs[i][j][k]\n Out[i][j][k][1]=Names[j]\n \n \n if len(Out[i][j])>1:\n Out1[i]=np.concatenate([Out1[i],Out[i][j]],axis=0)\n \n Out[i]=Out1[i][1:]\n \n if output_sort=='time':\n Out1[i]=np.array(Out[i][:,0],dtype=float)\n Out[i][:,0]=np.array(np.sort(Out1[i]),dtype=str)\n indexs=np.argsort(Out1[i]) \n out_names=list(Out[i][:,1])\n for j in xrange(len(indexs)): \n Out[i][j,1]=out_names[indexs[j]]\n \n \n return Out\n \ndef write_text(X,names,suffix='.ADT.txt',save_dir='current'):\n \n if save_dir != 'current':\n current_dir=os.getcwd()\n os.chdir(save_dir)\n \n for i in xrange(len(names)):\n file = open(names[i]+suffix, \"w\") \n for j in xrange(len(X[i])):\n X[i][j][0]=X[i][j][0][0:8]\n item=\" \".join(X[i][j])\n file.write(\"%s\\n\" % item)\n \n if save_dir != 'current':\n os.chdir(current_dir) \n \n \n \n ","sub_path":"ADTLib/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"213245082","text":"import numpy as np\r\nimport util\r\nimport scipy\r\n\r\nimport scipy.sparse.csr\r\ndef kernel_linear_unary(X_train, X_test, lhp, no_noise):\r\n p = np.dot(X_train,X_test.T)\r\n if isinstance(p, scipy.sparse.csr.csr_matrix):\r\n p = p.toarray() # cos if using X_train sparse vector, p will be a csr_matrix -- incidentally in this case the resulting k_unary cannot be flattened, it will result in a (1,X) 2D matrix !\r\n k_unary = np.exp(lhp[\"unary\"]) * np.array(p, dtype=util.dtype_for_arrays)\r\n return noisify(k_unary, lhp, no_noise)\r\n \r\nimport sklearn.metrics.pairwise\r\ndef kernel_exponential_unary(X_train, X_test, lhp, no_noise):\r\n p = sklearn.metrics.pairwise.euclidean_distances(X_train, X_test, squared=True)\r\n # sames as scipy.spatial.distance.cdist(X_train,X_test, 'sqeuclidean')\r\n # but works with Scipy sparse and Numpy dense arrays\r\n # I thought it would be equal to X_train.dot(X_train.T) + X_test.dot(X_test.T) - X_train.dot(X_test.T) - X_test.dot(X_train.T))\r\n # but it doesnt seem to\r\n k_unary = util.dtype_for_arrays(np.exp(lhp[\"unary\"]) * np.exp( -(1/2) * 1/(np.exp(lhp[\"length_scale\"])**2 ) * p))\r\n return noisify(k_unary, lhp, no_noise)\r\n \r\nimport scipy.spatial.distance\r\ndef kernel_exponential_ard(X_train, X_test, lhp, no_noise):\r\n p = scipy.spatial.distance.cdist(X_train, X_test, metric='mahalanobis', VI=np.diag(1/np.exp(lhp['variances'])))\r\n k_unary = util.dtype_for_arrays(np.exp(lhp[\"unary\"]) * np.exp( (-1/2) * np.square(p)))\r\n return noisify(k_unary, lhp, no_noise)\r\n\r\ndef noisify(k_unary, lhp, no_noise):\r\n if no_noise:\r\n return k_unary\r\n else:\r\n return k_unary + (np.exp(lhp[\"noise\"])) * np.eye(k_unary.shape[0])\r\n \r\ndef compute_kernels_from_data(kernel, lhp, X_train, X_test, n_labels):\r\n k_unary = kernel(X_train, X_train, lhp, no_noise=False)\r\n# read_randoms(len(k_unary.flatten(order='F')), should=k_unary.flatten(order='F'), true_random_source=False) # DEBUG\r\n lower_chol_k_compact = gram_compact(np.linalg.cholesky(k_unary), np.sqrt(np.exp(lhp[\"binary\"])), n_labels)\r\n # NB no noise for prediction\r\n '''\r\n must compute S = K* ' K^-1, equivalent to S K = K*', ie K' S' = K*, ie K S' = K* (cos K sym)\r\n x=solve(A,b) returns x solution of Ax=b\r\n so solve(K, K*) gives S', hence need to transpose the result \r\n '''\r\n k_star_unary = kernel(X_train, X_test, lhp, no_noise=True)\r\n k_star_T_k_inv = gram_compact(np.linalg.solve(k_unary, k_star_unary).T, gram_binary_scalar=1, n_labels=n_labels)\r\n \r\n #read_randoms(should=k_star_T_k_inv_unary.ravel(order='F'), true_random_source=False) #DEBUG\r\n\r\n #===========================================================================\r\n #if (n_f_star > 0):\r\n # % cholcov( k** - k*' K^-1 k* )\r\n # lowerCholfStarCov = chol(exp(lhp(1)) * (X_test * X_test') ...\r\n # + noise_param * eye(TT_test) ... % jitter not needed in theory, but in practice needed for numerical stability of chol() operation\r\n # - k_star_T_k_inv_unary * kStar_unaryT')';\r\n #===========================================================================\r\n #del k_star_unary # not needed since we're in local scope anyway\r\n\r\n return (lower_chol_k_compact, k_star_T_k_inv)\r\n \r\nclass gram_compact():\r\n def __init__(self, gram_unary, gram_binary_scalar, n_labels):\r\n \"\"\"\r\n gram_unary could be eg kStarTKInv_unary\r\n gram_binary_scalar eg just binary hyperparameter; binary part of matrix assumed = gram_binary_scalar * eye(n_labels **2)\r\n \"\"\"\r\n self.gram_unary = gram_unary\r\n self.gram_binary_scalar = gram_binary_scalar\r\n self.n_labels = n_labels\r\n self.n_star = gram_unary.shape[0]\r\n self.n = gram_unary.shape[1]\r\n \r\n def T_solve(self, v):\r\n assert(v.shape[0] == (self.n_labels * self.n + self.n_labels ** 2))\r\n result = np.zeros((self.n_labels * self.n + self.n_labels ** 2), dtype=util.dtype_for_arrays)\r\n for label in range(self.n_labels):\r\n result[label * self.n : (label + 1) * self.n] = np.linalg.solve(self.gram_unary.T, v[label*self.n : (label+1)*self.n])\r\n result[self.n*self.n_labels:] = v[self.n*self.n_labels:] / self.gram_binary_scalar # binary section, should be length n_labels ** 2\r\n return result\r\n \r\n def dot_wrapper(self, v, A):\r\n assert(v.shape[0] == (self.n_labels * self.n + self.n_labels ** 2))\r\n result = np.zeros((self.n_labels * self.n_star + self.n_labels ** 2), dtype=util.dtype_for_arrays)\r\n for label in range(self.n_labels):\r\n result[label * self.n_star : (label + 1) * self.n_star] = np.dot(A, v[label*self.n : (label+1)*self.n])\r\n # maybe can rewrite this by properly shaping the values in f, and then doing a single dot()\r\n # but this is no performance bottleneck so leave it\r\n result[self.n_star*self.n_labels:] = v[self.n*self.n_labels:] * self.gram_binary_scalar # binary section, should be length n_labels ** 2\r\n return result\r\n \r\n def dot(self, v):\r\n return self.dot_wrapper(v, self.gram_unary)\r\n \r\n def T_dot(self, v):\r\n return self.dot_wrapper(v, self.gram_unary.T)\r\n \r\n def diag_log_sum(self):\r\n return np.log(np.diag(self.gram_unary)).sum() * self.n_labels + np.log(self.gram_binary_scalar) * self.n_labels ** 2\r\n\r\nif __name__ == \"__main__\":\r\n import scipy\r\n\r\n def expand_kernel(k_unary, k_binary, n_labels):\r\n l = [k_unary]*n_labels # [k_unary, k_unary, ... k_unary], with length n_label\r\n l.append(k_binary)\r\n return scipy.linalg.block_diag(*tuple(l))\r\n n_labels = 3\r\n n = 10\r\n n_star = 11\r\n k_unary = np.random.rand(n_star, n) # like kStarTKInv_unary\r\n k_binary_scalar = np.random.rand() # equivalent to lhp['binary']\r\n v = np.random.rand(n * n_labels + n_labels **2)\r\n \r\n np.testing.assert_array_equal(\r\n util.dtype_for_arrays(expand_kernel(k_unary, k_binary_scalar * np.eye(n_labels **2), n_labels).dot(v)),\r\n gram_compact(k_unary, k_binary_scalar, n_labels).dot(v))\r\n \r\n k_unary = np.random.rand(n, n) # now square\r\n np.testing.assert_array_equal(\r\n util.dtype_for_arrays(np.linalg.solve(expand_kernel(k_unary, k_binary_scalar * np.eye(n_labels **2), n_labels).T, v)),\r\n gram_compact(k_unary, k_binary_scalar, n_labels).T_solve(v))\r\n np.testing.assert_array_equal(\r\n util.dtype_for_arrays(expand_kernel(k_unary, k_binary_scalar * np.eye(n_labels **2), n_labels).T.dot(v)),\r\n gram_compact(k_unary, k_binary_scalar, n_labels).T_dot(v)) # test works because this k_unary is not symetric\r\n np.testing.assert_approx_equal(\r\n np.sum(np.log(np.diag(expand_kernel(k_unary, k_binary_scalar * np.eye(n_labels **2), n_labels)))),\r\n gram_compact(k_unary, k_binary_scalar, n_labels).diag_log_sum())\r\n\r\n # test ARD exponential kernel\r\n X_train = np.array([[0]])\r\n X_test = np.array([[3]])\r\n np.testing.assert_approx_equal(kernel_exponential_ard(X_train, X_test, {'unary' : np.log(1), 'variances' : np.log([2])}, no_noise=True),\r\n np.exp(-1/2 * (1/2) * 3**2))\r\n X_train = np.array([[0,2]])\r\n X_test = np.array([[3,-5]])\r\n np.testing.assert_approx_equal(kernel_exponential_ard(X_train, X_test, {'unary' : np.log(6), 'variances' : np.log([2, 5])}, no_noise=True),\r\n 6 * np.exp(-1/2 * ((1/2) * 3**2 + (1/5) * 7**2)))\r\n np.testing.assert_approx_equal(\r\n kernel_exponential_unary(X_train, X_test, {'unary': np.log(1), 'length_scale': np.log(7)}, no_noise=True),\r\n kernel_exponential_ard(X_train, X_test, {'unary': np.log(1), 'variances' : np.log([7**2, 7**2])}, no_noise=True)\r\n )\r\n ","sub_path":"src/kernels.py","file_name":"kernels.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"51993436","text":"# Word Parser\n# April 10, 2020\n\n# Importing required modules\nimport PyPDF2\n\n# Creating a pdf file object\npdfFileObj = open(\"thinkpython.pdf\", 'rb') # rb stands for read, if I wanted to write I would put \"wb\"\n\n# Creating a pdf reader object\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n\n# Printing number of pages in pdf file\nprint(pdfReader.numPages)\n\n# Creating a page objext\npageObj = pdfReader.getPage(0)\n\n# Extracting text from page\n# MOST IMPORTANT PART\nprint(pageObj.extractText())\n\n# Closing the pdf file objext\npdfFileObj.close()\n","sub_path":"word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"408678395","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport argparse\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport pkg_resources\nimport sys\nimport time\n\nfrom dart_id.align import align\nfrom dart_id.converter import process_files\nfrom dart_id.figure_gen import exp_alignment, residuals, newpeps\nfrom dart_id.helper import add_global_args, read_config_file, init_logger, load_params_from_file\nfrom shutil import copyfile\nfrom string import Template\n\nlogger = logging.getLogger('root')\n\ndef figures(df, config=None, params=None):\n\n logger.info('Saving figures to {}'.format(config['output']))\n\n fig_data = {}\n\n try:\n fig_data['alignment'] = exp_alignment.gen(df, config, params, config['output'])\n except: pass\n\n try:\n fig_data['residuals'] = residuals.gen(df, config, params, config['output'])\n except: pass\n \n try:\n fig_data['newpeps'] = newpeps.gen(df, config, params, config['output'])\n except: pass\n\n generate_html(fig_data, config['output'])\n\n logger.info('Figure generation complete')\n\ndef generate_html(fig_data, output_path):\n logger.info('Generating HTML...')\n\n # build dict for variable injection into the HTML file\n fig_data = { 'data': json.dumps(fig_data) }\n\n # load HTML template and inject data variable\n logger.info('Loading template HTML and injecting variables...')\n template = pkg_resources.resource_string('dart_id', '/'.join((\n 'figure_resources', 'template.html')))\n template = template.decode('utf-8')\n template = Template(template).safe_substitute(fig_data)\n\n # write HTML template to output directory\n html_out = os.path.join(output_path, 'figures.html')\n logger.info('Writing template HTML file to {}'.format(html_out))\n with open(html_out, 'w') as template_out:\n template_out.write(template)\n\n # move resource files (CSS and JS)\n logger.info('Moving HTML resource files')\n resource_files = [\n 'bootstrap.min.css', 'bootstrap.min.js', \n 'jquery-3.3.1.slim.min.js', 'styles.css'] #,\n #'jquery.dataTables.min.css', 'jquery.dataTables.min.js'\n #]\n for i in resource_files:\n copyfile(pkg_resources.resource_filename(\n 'dart_id', '/'.join(('figure_resources', i))), \n os.path.join(output_path, 'figures', i))\n\n\ndef main():\n # load command-line args\n parser = argparse.ArgumentParser() \n add_global_args(parser)\n args = parser.parse_args()\n\n # load config file\n # this function also creates the output folder\n config = read_config_file(args)\n\n # initialize logger\n init_logger(config['verbose'], os.path.join(config['output'], 'figures.log'))\n\n # load first input file and replace home and user vars\n f = config['input'][0]\n f = os.path.expanduser(f)\n f = os.path.expandvars(f)\n\n # read in input files\n df = pd.read_csv(f, sep='\\t', low_memory=False)\n params = load_params_from_file(config['params_folder'])\n\n # TODO: check that the columns we need are present\n\n # generate figures\n figures(df, config=config, params=params)\n\nif __name__ == '__main__':\n main()","sub_path":"dart_id/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"38119216","text":"from django.core.mail import EmailMultiAlternatives\nimport re\n\n\nclass CompanyEmail:\n\n from_email = 'kwasi.innovate@gmail.com'\n to_email = 'kwasi.adu@gmail.com'\n subject = 'Hi'\n html_content = \"\"\n\n def remove_html_tags(self, content):\n \"\"\"Remove html tags from a string\"\"\"\n reg_expr = re.compile('<.*?>')\n return re.sub(reg_expr, '', content)\n\n def __init__(self, from_email=from_email,\n to_email=to_email, subject=subject, html_content=html_content):\n\n self.from_email = from_email\n self.to_email = to_email\n self.subject = subject\n self.html_content = html_content\n self.text_content = self.remove_html_tags(html_content)\n\n def send(self):\n\n msg = EmailMultiAlternatives(self.subject, self.text_content, self.from_email,\n [self.to_email])\n msg.attach_alternative(self.html_content, \"text/html\")\n msg.send()\n\n\nclass NewTalentEmail(CompanyEmail):\n\n def __init__(self):\n\n self.subject = 'Welcome'\n self.html_content = \"\"\"\n

Thanks for signing up with us

\n \"\"\"\n self.text_content = self.remove_html_tags(self.html_content)\n\n\nclass NewContactNotification(CompanyEmail):\n\n def __init__(self, contact):\n\n self.contact = contact\n self.subject = \"New Innovate Contact\"\n self.to_email = \"kwasi.adu@gmail.com\"\n self.html_content = \"\"\"\n

A new company request has been entered

    \n \"\"\"\n\n for k, v in self.contact.iteritems():\n html_line = \"
  • \" + \"\" + k + \": \" \\\n + v + \"
  • \"\n self.html_content += html_line\n\n self.html_content += \"
\"\n self.text_content = self.remove_html_tags(self.html_content)\n\n\nclass NewContactEmail(CompanyEmail):\n\n def __init__(self, contact):\n contact_name = contact['name']\n self.subject = \"Welcome\"\n self.to_email = contact['email']\n self.html_content = \"\"\"\n

Hi %(contact_name)s,

\n

Thank you for choosing us. We shall get back to you within 24\n hrs\n

\n\n \"\"\" % locals()\n\n self.text_content = self.remove_html_tags(self.html_content)","sub_path":"mysite/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"229272370","text":"from flask import Blueprint, jsonify, request, current_app\nfrom recipefinder import db\nfrom recipefinder.models import Recipe, RecipeSchema\nfrom recipefinder.globalutils import token_required\nfrom .utils import init_Category, init_tags, init_ingredient, exist_checker, recipe_jsonify\n\nrecipes = Blueprint('recipes', __name__)\n\nrecipe_schema = RecipeSchema()\nrecipes_schema = RecipeSchema(many=True)\n\n\n# @recipes.get(\"/testish\")\n# def testing():\n# temp = Recipe.query.get(2)\n# a = \"vegan\" in temp.tags.name\n# print(temp.tags)\n# return jsonify({\"message\": a}), 200\n\n\n@recipes.get('/recipes')\ndef get_all_recipes():\n page = request.args.get('page', 1, type=int)\n # print(page)\n recipes = Recipe.query.order_by(\n Recipe.created_at.desc()).paginate(per_page=10, page=page)\n result = []\n for recipe in recipes.items:\n result.append(recipe_jsonify(recipe))\n # result = recipes_schema.dump(recipes.items)\n # return jsonify(result), 200\n return jsonify(result), 200\n\n\n@recipes.post('/recipes')\n@token_required\ndef create_recipes(current_user):\n data = request.json\n title = data['title']\n link = data['link']\n cates = init_Category(data['category'])\n tags = init_tags(data['tags'])\n ings = init_ingredient(data['ingredients'])\n instruction = data['instruction']\n try:\n new_recipe = Recipe(title=title, link=link,\n instruction=instruction, creator=current_user)\n for cat in cates:\n new_recipe.categories.append(cat)\n for tag in tags:\n new_recipe.tags.append(tag)\n for ing in ings:\n new_recipe.ingredients.append(ing)\n db.session.add(new_recipe)\n db.session.commit()\n except Exception as e:\n print(e)\n return jsonify({'message': 'Error creating recipe'}), 400\n db.session.flush()\n return jsonify(recipe_jsonify(new_recipe))\n\n\n@recipes.get('/recipes/')\ndef get_recipe(recipe_id):\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 400\n return jsonify(recipe_jsonify(recipe))\n\n\n@recipes.patch('/recipes/')\n@token_required\ndef edit_recipe(current_user, recipe_id):\n if not current_user:\n return jsonify({'message': 'must login'}), 400\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 404\n if current_user != recipe.creator:\n return jsonify({'message': 'You do not have permission to edit this post'}), 401\n data = request.json\n recipe.title = data['title']\n recipe.link = data['link']\n recipe.instruction = data['instruction']\n db.session.commit()\n return jsonify(recipe_jsonify(recipe))\n\n\n@recipes.patch('/recipes//like')\n@token_required\ndef like_recipe(current_user, recipe_id):\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 400\n recipe.update_like(current_user)\n db.session.commit()\n return jsonify(recipe_jsonify(recipe)), 201\n\n\n@recipes.patch('/recipes//dislike')\n@token_required\ndef dislike_recipe(current_user, recipe_id):\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 400\n recipe.update_dislike(current_user)\n db.session.commit()\n return jsonify(recipe_jsonify(recipe)), 201\n\n\n@recipes.patch('/recipes//save')\n@token_required\ndef save_recipe(current_user, recipe_id):\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 400\n recipe.update_saved(current_user)\n db.session.commit()\n return jsonify(recipe_jsonify(recipe)), 201\n\n\n@recipes.delete('/recipes/')\n@token_required\ndef delete_recipe(current_user, recipe_id):\n recipe = Recipe.query.get(recipe_id)\n if not recipe:\n return jsonify({'message': 'the recipe does not exist'}), 400\n if current_user != recipe.creator:\n return jsonify({'message': 'You do not have permission to edit this post'}), 401\n db.session.delete(recipe)\n db.session.commit()\n return jsonify(recipe_jsonify(recipe))\n\n\n@recipes.patch(\"/recipes//add_ingredient\")\n@recipes.patch(\"/recipes//add_tag\")\n@recipes.patch(\"/recipes//add_category\")\n@recipes.patch(\"/recipes//remove_ingredient\")\n@recipes.patch(\"/recipes//remove_tag\")\n@recipes.patch(\"/recipes//remove_category\")\ndef routesTodo():\n return jsonify({\"message\": \"WIP\"}), 200\n","sub_path":"server/recipefinder/recipes/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"647765974","text":"def check_fermat (a, b, c, n):\n if n > 2:\n if a**n + b**n == c**n:\n print('Holy smokes, Fermat was wrong!')\n else:\n print('No, that doesn\\'t work.')\n else:\n print('Fermat said for values of n greater than 2.')\n\nprint('Integer values to test for a^n + b^n = c^n')\nua = input('a = ')\nub = input('b = ')\nuc = input('c = ')\nun = input('n = ')\n\ncheck_fermat (int(ua), int(ub), int(uc), int(un))\n","sub_path":"exercise5-1.py","file_name":"exercise5-1.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"288093166","text":"\"\"\" 给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。\n\n输入: \"babad\"\n输出: \"bab\"\n注意: \"aba\" 也是一个有效答案。 \"\"\"\n\n\"\"\" 优化验证是否为回文的部分\n使用动态规划的思路,先完成1字符和2字符的回文验证,\n之后通过查表验证是否回文\nP[i,j]表示s[i...j]前闭后闭区间内是否为回文串\nP[i,j] = P[i+1,j-1] && s[i] = s[j]\n时间复杂度:O(n^2)\n空间复杂度:O(n^2) \"\"\"\n\n\nclass Solution1(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n ret = ''\n length = len(s)\n p = [[False for x in range(0, length)] for y in range(0, length)]\n # 初始化1字符和2字符的回文验证\n for i in range(0, length):\n p[i][i] = True\n if len(ret) < 2:\n ret = s[i]\n if (i + 1 < length) and (s[i] == s[i+1]):\n p[i][i+1] = True\n ret = s[i:i+2]\n # 按长度的顺序依次填充查询表\n for i in range(2, length):\n # i代表偏移, i+1代表回文串长度\n for j in range(0, length):\n if j + i < length and p[j + 1][j + i - 1] and s[j] == s[j + i]:\n p[j][j + i] = True\n if len(ret) < i + 1:\n ret = s[j: j + i + 1]\n return ret\n\n\n\"\"\" \n中心拓展,有2n-1个中心(考虑奇偶)\n从中心向两侧拓展\n时间复杂度:O(n^2)\n空间复杂度:O(1)\n\"\"\"\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if len(s) == 0:\n return \"\"\n start_i = 0\n end_i = 0\n for i in range(0, len(s)):\n oddlen_i = self.__expand_around_center(s, i, i)\n evenlen_i = self.__expand_around_center(s, i, i + 1)\n len_i = max(oddlen_i, evenlen_i)\n if len_i > end_i - start_i:\n start_i = i - (len_i - 1) / 2\n end_i = i + len_i / 2\n return s[start_i : end_i + 1]\n\n def __expand_around_center(self, s, left_i, right_i):\n while left_i >= 0 and right_i < len(s) and s[left_i] == s[right_i]:\n left_i -= 1\n right_i += 1\n return right_i - left_i - 1","sub_path":"5.longestPalindrome/longestPalindrome.py","file_name":"longestPalindrome.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"638356821","text":"import app\r\ndef M_deleteNo(): #delete using item_no\r\n mydb = app.connection.connect()\r\n cursor = mydb.cursor()\r\n delete = input('\\nEnter Item_no to delete : ')\r\n cursor.execute('delete from menu where item_no={}'.format(delete))\r\n print('successfully Deleted item_no {}')\r\n mydb.commit()\r\n mydb.close()\r\n \r\n#M_deleteNo()\r\n\r\ndef M_deleteName(): #delete using item_name\r\n mydb = app.connection.connect()\r\n cursor = mydb.cursor()\r\n delete = input('\\nEnter Item_name to delete : ')\r\n cursor.execute(\"delete from menu where item_name='{}'\".format(delete))\r\n print('successfully Deleted item_name {}')\r\n mydb.commit()\r\n mydb.close()\r\n \r\n#M_deleteName()\r\n\r\n\r\n\r\n","sub_path":"delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"298132512","text":"import os\r\nimport sqlite3\r\nimport requests\r\nfrom win32.win32crypt import CryptUnprotectData\r\nimport re\r\nimport jmr_read_config\r\n\r\ndef getcookiefromchrome(host='www.baidu.com'):\r\n cookiepath=os.environ['LOCALAPPDATA']+r\"\\Google\\Chrome\\User Data\\Default\\Cookies\"\r\n sql=\"select host_key,name,encrypted_value from cookies where host_key='%s'\" % host\r\n with sqlite3.connect(cookiepath) as conn:\r\n cu=conn.cursor()\r\n cookies={name:CryptUnprotectData(encrypted_value)[1].decode() for host_key,name,encrypted_value in cu.execute(sql).fetchall()}\r\n return cookies\r\n\r\ndef savecookie2file(cookies = {}, cookiesfile = 'cookies.data'):\r\n cookies = cookies\r\n cookiesfile = cookiesfile\r\n with open(cookiesfile, 'w') as cookies_f:\r\n cookies_f.write(str(cookies))\r\n print(\"save to file cookies :\",cookies)\r\n return len(str(cookies))\r\n\r\ndef readcookie_from_file(cookiesfile = 'cookies.data'):\r\n cl = {}\r\n cookiesfile = cookiesfile\r\n\r\n with open(cookiesfile, 'r') as cookies_f:\r\n cl = eval(cookies_f.read())\r\n return cl\r\n\r\ndef main():\r\n if not os.path.exists('config.ini') : jmr_read_config.save_config_file('config.ini')\r\n cfg = jmr_read_config.readconfig_from_file('config.ini')\r\n\r\n url = cfg[\"url\"]\r\n url_chrome = url.split(\"/\")[2].split(\":\")[0]\r\n httphead = eval(cfg[\"http_header\"])\r\n cookies_save = getcookiefromchrome(url_chrome)\r\n savecookie2file(cookies_save,'cookies.data')\r\n\r\n cl = readcookie_from_file('cookies.data')\r\n r = requests.get(url , headers = httphead, cookies = cl , allow_redirects = 1)\r\n print (\"Check login : - \" , (\"shanghai\" in r.text) )\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"jmr_save_cookies.py","file_name":"jmr_save_cookies.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432298359","text":"#!/usr/bin/env python\n\nimport time\nimport sys\nimport random\nfrom psychopy import visual,event,core,gui\nimport csv\n\n\"\"\"\n\n11. Do something new. \nCompare response times to first and last names, \nmeasure effect of font face, etc.\n\"\"\"\n\nnames = open('names.txt', 'r').readlines()\nfirstNames = [name.split(' ')[0] for name in names]\nlastNames = [name.split(' ')[1].rstrip() for name in names]\nNames = firstNames + lastNames\n\"\"\"\nthe two line above are a more compact way of writing: \nnames = open('names.txt', 'r').readlines()\nfirstNames=[]\nfor name in names:\n firstNames.append(name.split(' ')[0])\n\"\"\" \nprompt = \"Please enter a name\\n >\"\n\nprompt = gui.Dlg(title=\"Please enter a name:\\n\")\nprompt.addField('Name')\nwin = visual.Window([800,600],color=\"black\", units='pix')\n# feedback wrong\nNameNoExistStim = visual.TextStim(win,text=\"Name does not exist\", height=40, color=\"red\",pos=[0,0])\nNameStim = visual.TextStim(win,text=\"\", height=40, color=\"white\",pos=[0,0])\nfixationStim = visual.TextStim(win,text=\"+\", height=40, color=\"white\",pos=[0,0])\n# feedback correct\ncStim = visual.TextStim(win,text=\"0\", height=40, color=\"green\",pos=[0,0])\n# feedback wrong\nwStim = visual.TextStim(win,text=\"X\", height=40, color=\"red\",pos=[0,0])\n\nresp = prompt.show()\nif len(resp) and resp[0] in firstNames:\n NameStim.setText(\"Proceeding...\")\n NameStim.draw()\nelse:\n NameNoExistStim.draw()\n\nwin.flip()\ncore.wait(2)\n\ntrialNum = 1\nwhile True:\n fixationStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n # random.choice takes a list as an argument\n nameShown = random.choice(Names)\n NameStim.setText(nameShown)\n NameStim.draw()\n win.flip()\n ACC = 1\n RT = 'NA'\n while True :\n resp = event.waitKeys(maxWait = 1)\n print(resp)\n if not resp :\n wStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n break \n # First names\n if nameShown in firstNames : \n firstLast = 'f'\n if ( ('f' in resp) ) : \n cStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n ACC = 1\n break\n else:\n wStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n ACC = 0\n break \n\n if nameShown in lastNames : \n firstLast = 'l'\n if ( ('f' in resp) ) : \n cStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n ACC = 1\n break\n else:\n wStim.draw()\n win.flip()\n core.wait(.5)\n win.flip()\n ACC = 0\n break \n # Write the file \n outfile = \"output_11.txt\" \n openfile = open(outfile, 'a')\n csvout = csv.writer(openfile, delimiter='\\t')\n write_me = [trialNum, firstLast, nameShown, ACC, RT]\n csvout.writerow(write_me)\n openfile.close()\n trialNum += 1\n core.wait(.5)\n win.flip()\n\n\n","sub_path":"exercise_2_11.py","file_name":"exercise_2_11.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"553059306","text":"from django.contrib import admin\nfrom django_admin_listfilter_dropdown.filters import DropdownFilter, RelatedDropdownFilter\n\nfrom staff_models.staff_groups.class_models.staff_deliver import StaffDeliver\nfrom staff_models.staffs.class_models.staff import Staff\n\n\nclass StaffDeliverAdmin(admin.ModelAdmin):\n list_display = ('id', 'staff', 'assigned_date', 'is_active')\n list_display_links = ['staff', ]\n list_per_page = 25\n list_filter = (\n # for ordinary fields\n ('assigned_date', DropdownFilter),\n ('is_active', DropdownFilter),\n # for choice fields\n # ('a_choicefield', ChoiceDropdownFilter),\n # for related fields\n ('staff', RelatedDropdownFilter),\n )\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n # stock\n if db_field.name == \"staff\":\n try:\n # parent_id = request.resolver_match.args[0]\n kwargs[\"queryset\"] = Staff.objects.filter(\n is_active=True\n )\n except IndexError:\n pass\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nadmin.site.register(StaffDeliver, StaffDeliverAdmin)\n","sub_path":"staff_models/staff_groups/class_admins/staff_deliver_admin.py","file_name":"staff_deliver_admin.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68781239","text":"#importing in functions that we need for the program to work\r\nimport pygame as pg\r\nimport random\r\nfrom textbox import TextBox\r\n\r\npg.init()\r\n\r\n#setting up colour RGB values\r\nwhite = (255,255,255)\r\nred = (255,0,0)\r\nblue = (0,0,255)\r\ngreen = (0,255,0)\r\nblack = (0,0,0)\r\npink = (255,20,147)\r\nbright_red = (200,0,0)\r\nbright_green = (0,200,0)\r\n\r\nbg = pg.image.load('startscreen.png')\r\n\r\nclock = pg.time.Clock()\r\n\r\nfont = pg.font.Font(None, 25)\r\nframe_count = 0\r\nframe_rate = 60\r\nstart_time = 180\r\n\r\nmenuDisplay = pg.display.set_mode((1200,600))\r\ngameDisplay = pg.display.set_mode((1200, 600))\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\ngameExit = False\r\n\r\nKEY_REPEAT_SETTING = (200,70)#textbox to appear in the same position after action \r\n\r\n\"\"\"setting instruction colour and font\"\"\"\r\ndef instruction(i,Space,List):\r\n intrs = List\r\n font = pg.font.SysFont(\"Times\", 25)\r\n message = intrs[i]\r\n rend = font.render(message, True, pg.Color(\"red\"))\r\n return (rend, rend.get_rect(topleft=(900,35+Space)))\r\n\r\n\"\"\"setting font and colour of the name displayed at the start\"\"\"\r\ndef text_objects(text, font):\r\n textSurface = font.render(text, True, black)\r\n return textSurface, textSurface.get_rect()\r\n\r\n\"\"\"quit game button function\"\"\"\r\ndef quit_game():\r\n pg.quit()\r\n quit()\r\n\"\"\"start game button function\"\"\"\r\ndef start_game():\r\n app = MainProgram()\r\n app.main_loop()\r\n\r\n \r\n#Right hand side title screen colour\r\ndef game_intro():\r\n x=0\r\n y=0\r\n i = 0\r\n intro = True\r\n gameDisplay.fill(black)\r\n while intro:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT: #quit function added \r\n pg.quit()\r\n quit()\r\n \r\n#Now creating the start-up screen\r\n \r\n gameDisplay.blit(bg,(x,y)) #displaying in starmenu bg being pygame, x and y being the position\r\n largeText = pg.font.Font('freesansbold.ttf',80)#font and font size\r\n TextSurf, TextRect = text_objects(\"Bargain Inspector\", largeText)#Program name\r\n TextRect.center = ((display_width/2),(display_height/2)) #text allignment\r\n gameDisplay.blit(TextSurf, TextRect)\r\n \r\n button(\"Start\",80,450,120,85,white,bright_green,start_game) #Button which starts the program, position,size, colour and linked to the start game function \r\n button(\"Quit Game\",605,450,160,85,white,bright_red,quit_game) # Button which closes the program, position,size, colour and linked to the quit game function\r\n\r\n intrs = [\"INSTRUCTIONS:\",\"Enter colour for car type\", \"Enter co-ordinates\", \"Enter value of car models\", \"Enter time for robot\",]#Instruction displayed on the screen\r\n space = int(150) #position of the instruction on the screen \r\n \r\n while i != 5: #5 total strings\r\n prompt = instruction(i,space,intrs)#i=number of instructions, space = position, intrs = intructions\r\n gameDisplay.blit(*prompt)\r\n space = space + 40 #how close to each other the instructions\r\n pg.display.update() #for clock speed functuon\r\n i = i+1\r\n \r\n \r\n pg.display.update()\r\n#limit the clock speed to 15 FPS (to prevent overflow)\r\n clock.tick(15)\r\n\r\n#buttons function defined, event driven action (for the Start game and Quit button) \r\ndef button(msg,x,y,w,h,ic,ac,action=None):\r\n mouse = pg.mouse.get_pos()\r\n click = pg.mouse.get_pressed()\r\n print(click)\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y: #when mouse button clicked outcome (1,0,0)\r\n pg.draw.rect(gameDisplay, ac,(x,y,w,h))\r\n if click[0] == 1 and action != None: #if mouse position (0,0,0 = no action, otherwise event driven action)\r\n action()\r\n else:\r\n pg.draw.rect(gameDisplay, ic,(x,y,w,h))\r\n smallText = pg.font.SysFont(\"Times\",20)\r\n textSurf, textRect = text_objects(msg, smallText)\r\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\r\n gameDisplay.blit(textSurf, textRect)\r\n\r\n#Initialising the main program with class attribute\r\nclass MainProgram(object):\r\n def __init__(self):\r\n \"\"\"The initialisation function of key components of the main program\"\"\"\r\n pg.init()\r\n pg.display.set_caption(\"D5's Bargain Inspector\")\r\n bg = pg.image.load('mainScreen.png')\r\n gameDisplay.blit(bg,(0,0))\r\n self.red = []\r\n self.blue = []\r\n self.green = []\r\n self.pink = []\r\n self.colourA =[]\r\n self.colour = \"\"\r\n self.num_items = 0\r\n self.finishedList =[]\r\n self.time = 0\r\n self.frame_count = 0\r\n self.frame_rate = 60\r\n self.start_time = 180\r\n self.screen = menuDisplay\r\n self.clock = pg.time.Clock()\r\n self.robot_loc = []\r\n self.fps = 60.0\r\n self.done = False\r\n self.input = TextBox((900,200,200,40),command=self.get_input, #setting the size and position of the text box\r\n clear_on_enter=True,inactive_on_enter=False)\r\n self.user_input = \"\"\r\n self.color = white\r\n self.prompt = self.make_prompt('Enter Red:BMW, Blue:Vauxhall, Green:Land Rover, Pink:Lexus')\r\n pg.key.set_repeat(*KEY_REPEAT_SETTING) #textbox to appear in the same position after action \r\n\r\n def make_prompt(self,Message):\r\n \"\"\" Function to create the labels, called everytime a new input is entered \"\"\"\r\n pg.draw.rect(menuDisplay , white,(820,165,400,30)) #1 is left right position, 2 is up down, 3 is width, 4 is height \r\n font = pg.font.SysFont(\"Times\", 14)\r\n message = Message\r\n rend = font.render(message, True, pg.Color(\"black\"))\r\n return (rend, rend.get_rect(topleft=(820,165)))#position of the text in the screen\r\n\r\n def event_loop(self):\r\n \"\"\" A continuous FOR loop which allows an exit for our main program\"\"\"\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.done = True\r\n self.input.get_event(event)\r\n\r\n def random_types(self):\r\n \"\"\"Randomly generates colours into the screen and randomly gives them a price and a name eg. red-bmw,price \"\"\"\r\n names = [\"BMW\", \"Vauxhall\", \"Land Rover\", \"Lexus\"]\r\n for i in range(50):\r\n item = random.randint(1,3)\r\n radx = random.randint(0,790)\r\n rady = random.randint(0,590)\r\n radp = random.randint(1,20)\r\n radnum = random.randint(1,4) - 1\r\n radn = names[radnum]\r\n coords = [radx,rady,radp,radn]\r\n\r\n if item == 1:\r\n pg.draw.rect(menuDisplay , red,(radx,rady,10,10))\r\n self.red.append(coords)\r\n elif item == 2:\r\n pg.draw.rect(menuDisplay , blue,(radx,rady,10,10))\r\n self.blue.append(coords)\r\n elif item == 3:\r\n pg.draw.rect(menuDisplay, green,(radx,rady,10,10))\r\n self.green.append(coords)\r\n elif item == 4:\r\n pg.draw.rect(menuDisplay, pink,(radx,rady,10,10))\r\n self.pink.append(coords)\r\n i = i +1\r\n\r\n def get_input(self,id,input):\r\n \"\"\" allows the user to search for cars by enterin a specific colour \"\"\"\r\n try:\r\n input = input.lower()\r\n self.user_input = input\r\n self.colour = input\r\n if self.user_input == \"red\" or self.user_input == \"blue\" or self.user_input == \"green\" or self.user_input == \"pink\":\r\n self.prompt = self.make_prompt('Where do you want to start : e.g. NW')\r\n self.input = TextBox((900,200,200,40),command=self.robot_start, # textbox position \r\n clear_on_enter=True,inactive_on_enter=False)\r\n if input == \"red\":\r\n for coord in self.red:\r\n x = coord[0]\r\n y = coord[1]\r\n pg.draw.rect(menuDisplay, red,(x,y,15,15))\r\n self.colourA = self.red\r\n\r\n elif input == \"blue\":\r\n for coord in self.blue:\r\n x = coord[0]\r\n y = coord[1]\r\n pg.draw.rect(menuDisplay, blue,(x,y,15,15))\r\n self.colourA = self.blue\r\n\r\n elif input == \"green\":\r\n for coord in self.green:\r\n x = coord[0]\r\n y = coord[1]\r\n pg.draw.rect(menuDisplay, green,(x,y,15,15))\r\n self.colourA = self.green\r\n\r\n elif input == \"pink\":\r\n for coord in self.pink:\r\n x = coord[0]\r\n y = coord[1]\r\n pg.draw.rect(menuDisplay, pink,(x,y,15,15))\r\n self.colourA = self.pink\r\n \r\n else:\r\n self.prompt = self.make_prompt('Please enter the colour type given')\r\n self.screen = menuDisplay\r\n\r\n except ValueError:\r\n print(\"ERROR\")\r\n\r\n def robot_start(self,id,input):\r\n \"\"\" Allows the user to choose the starting position of the robot\"\"\"\r\n input = input.upper()\r\n self.robot_loc = input\r\n if input == \"N\":\r\n pg.draw.rect(menuDisplay, red,(400,0,20,30))\r\n self.robot_loc = [400,0]\r\n elif input == \"E\":\r\n pg.draw.rect(menuDisplay, blue,(750,300,20,30))\r\n self.robot_loc = [750,300]\r\n elif input == \"S\":\r\n pg.draw.rect(menuDisplay, pink,(400,550,20,30))\r\n self.robot_loc = [400,550]\r\n elif input == \"W\":\r\n pg.draw.rect(menuDisplay, green,(10,300,20,30))\r\n self.robot_loc = [10,300]\r\n elif input == \"NW\":\r\n pg.draw.rect(menuDisplay, bright_green,(10,10,20,30))\r\n self.robot_loc = [10,10]\r\n elif input == \"NE\":\r\n pg.draw.rect(menuDisplay, bright_red,(750,10,20,30))\r\n self.robot_loc = [750,10]\r\n elif input == \"SW\":\r\n pg.draw.rect(menuDisplay, red,(10,550,20,30))\r\n self.robot_loc = [10,550]\r\n elif input == \"SE\":\r\n pg.draw.rect(menuDisplay, pink,(750,550,20,30))\r\n self.robot_loc = [750,550]\r\n else:\r\n self.prompt = self.make_prompt('Please enter a valid co-cordinate for the robot to search')\r\n if input == \"N\" or input == \"E\" or input == \"S\" or input == \"W\" or input == \"NW\" or input == \"NE\" or input == \"SW\" or input == \"SE\":\r\n self.prompt = self.make_prompt('Please enter the number of car types you will like to find?')\r\n self.input = TextBox((900,200,200,40),command=self.number_of_items, #textbox position\r\n clear_on_enter=True,inactive_on_enter=False)\r\n\r\n def number_of_items(self,id,input):\r\n \"\"\" This will allow the user to enter the number of chosen car models they want to find\"\"\"\r\n\r\n if input.isdigit() and (int(input) <= len(self.colourA)):\r\n self.num_items = int(input)\r\n self.prompt = self.make_prompt('Enter the minutes you want the robot to search for?')\r\n self.input = TextBox((900,200,200,40),command=self.input_time, #textbox pisition\r\n clear_on_enter=True,inactive_on_enter=False)\r\n\r\n else:\r\n self.prompt = self.make_prompt('Please enter how many chosen car models to find?')\r\n\r\n def input_time(self,id,input):\r\n \"\"\" Allows the user to enter the time for the robot to search for car types\"\"\"\r\n\r\n if input.isdigit() and int(input) <= 15:\r\n self.time = input\r\n self.start_time = int(self.time) * 60\r\n\r\n else:\r\n self.prompt = self.make_prompt('Please enter a valid time, e.g 1 for 1 minute')\r\n\r\n def collide(self,c1, p1, p2, p3,xORy):\r\n \"\"\" Tests to see if the next pixals are not white\"\"\"\r\n locations = [p1,p2,p3]\r\n self.Collide = False\r\n i = 0\r\n if xORy == \"X\":\r\n while i != 3:\r\n colour = menuDisplay.get_at((c1,locations[i])) # gets the colour of the pixal at the coordinates\r\n if (c1 >= self.nextX and c1 <= (self.nextX + 15)) and (p1 >= self.nextY and p1 <= (self.nextY + 15)):\r\n i=i+1\r\n continue\r\n elif (colour[0] != 255 or colour[1] != 255 or colour[2] != 255):\r\n self.Collide = True\r\n break\r\n else:\r\n i=i+1\r\n continue\r\n elif xORy == \"Y\":\r\n while i != 3:\r\n colour = menuDisplay.get_at((locations[i],c1))\r\n if (c1 >= self.nextY and c1 <= (self.nextY + 15)) and (p1 >= self.nextX and p1 <= (self.nextX + 1)):\r\n i=i+1\r\n continue\r\n elif (colour[0] != 255 or colour[1] != 255 or colour[2] != 255):\r\n self.Collide = True\r\n break\r\n else:\r\n i=i+1\r\n continue\r\n\r\n def bubbleSort(self,colourL):\r\n \"\"\" Used to sort the list in order of price, cheapest first\"\"\"\r\n for passnum in range(len(colourL)-1,0,-1):\r\n for i in range(passnum):\r\n if colourL[i][2]>colourL[i+1][2]:\r\n temp = colourL[i]\r\n colourL[i] = colourL[i+1]\r\n colourL[i+1] = temp\r\n\r\n def binarySearch(self, alist, item):\r\n \"\"\"Used to search a list for the search item and returns all infomation about that item\"\"\"\r\n first = 0\r\n last = len(alist)-1\r\n found = False\r\n\r\n while first<=last and not found:\r\n midpoint = (first + last)//2\r\n if alist[midpoint][0] == item:\r\n return(alist[midpoint])\r\n else:\r\n if item < alist[midpoint][0]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\r\n\r\n return found\r\n\r\n def quick_sort(self,items):\r\n \"\"\" Used to sort a list in order by x coords for binary search\"\"\"\r\n if len(items) > 1:\r\n pivot_index = len(items) // 2\r\n smaller_items = []\r\n larger_items = []\r\n\r\n for i, val in enumerate(items):\r\n if i != pivot_index:\r\n if val < items[pivot_index]:\r\n smaller_items.append(val)\r\n else:\r\n larger_items.append(val)\r\n\r\n\r\n self.quick_sort(smaller_items)\r\n self.quick_sort(larger_items)\r\n items[:] = smaller_items + [items[pivot_index]] + larger_items\r\n\r\n def robot_move(self):\r\n \"\"\"Makes the robot move visually and makes a countdown timer that countdowns from the users input\"\"\"\r\n i = 0\r\n if self.colour == \"red\":\r\n self.bubbleSort(self.red)\r\n locations = self.red\r\n elif self.colour == \"blue\":\r\n self.bubbleSort(self.blue)\r\n locations = self.blue\r\n elif self.colour == \"green\":\r\n self.bubbleSort(self.green)\r\n locations = self.green\r\n elif self.colour == \"pink\":\r\n self.bubbleSort(self.pink)\r\n locations = self.pink\r\n#pg.draw.rect(menuDisplay, white,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n print(locations)\r\n while i != self.num_items : #Makes the robot move visually\r\n self.event_loop()\r\n nextX = locations[i][0]\r\n nextY = locations[i][1]\r\n\r\n if self.robot_loc[0] == nextX and self.robot_loc[1] == nextY:\r\n pg.draw.rect(menuDisplay, black,(nextX,nextY ,15,15))\r\n self.finishedList.append(locations[i][0])\r\n i = i + 1\r\n\r\n elif self.robot_loc[0] < nextX:\r\n pg.draw.rect(menuDisplay, white,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.robot_loc[0] = self.robot_loc[0] + 1\r\n pg.draw.rect(menuDisplay, pink,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.input.draw(self.screen)\r\n\r\n elif self.robot_loc[1] < nextY:\r\n pg.draw.rect(menuDisplay,white,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.robot_loc[1] = self.robot_loc[1] + 1\r\n pg.draw.rect(menuDisplay, pink,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.input.draw(self.screen)\r\n\r\n elif self.robot_loc[0] > nextX:\r\n pg.draw.rect(menuDisplay, white,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.robot_loc[0] = self.robot_loc[0] - 1\r\n pg.draw.rect(menuDisplay, pink,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.input.draw(self.screen)\r\n\r\n elif self.robot_loc[1] > nextY:\r\n pg.draw.rect(menuDisplay, white,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.robot_loc[1] = self.robot_loc[1] - 1\r\n pg.draw.rect(menuDisplay, pink,(self.robot_loc[0],self.robot_loc[1],20,30))\r\n self.input.draw(self.screen)\r\n\r\n self.event_loop()\r\n # Starts the timer countdown\r\n pg.draw.rect(menuDisplay, green,(810,540,400, 60))\r\n total_seconds = self.frame_count // self.frame_rate\r\n total_seconds = self.start_time - (self.frame_count // self.frame_rate)\r\n if total_seconds < 0:\r\n total_seconds = 0\r\n minutes = total_seconds // 60\r\n seconds = total_seconds % 60\r\n output_string = \"Time left: {0:02}:{1:02}\".format(minutes, seconds)\r\n text = font.render(output_string, True, black)\r\n menuDisplay.blit(text, [810, 540])\r\n if output_string == \"Time left: 00:00\":\r\n self.done = True\r\n self.frame_count += 1\r\n clock.tick(frame_rate)\r\n pg.display.flip()\r\n\r\n self.input.draw(self.screen)\r\n self.screen.blit(*self.prompt)\r\n\r\n pg.display.update()\r\n\r\n if self.colour == \"red\":\r\n self.quick_sort(self.red)\r\n elif self.colour == \"blue\":\r\n self.quick_sort(self.blue)\r\n elif self.colour == \"green\":\r\n self.quick_sort(self.green)\r\n elif self.colour == \"pink\":\r\n self.quick_sort(self.pink)\r\n\r\n self.clock.tick(self.fps)\r\n if self.time != 0:\r\n self.done = True\r\n\r\n def output_lists(self,i,Space):\r\n \"\"\"Displays the list of cheapest items picked up\"\"\"\r\n if self.colour == \"red\":\r\n output = self.binarySearch(self.red, self.finishedList[i])\r\n elif self.colour == \"blue\":\r\n output = self.binarySearch(self.blue, self.finishedList[i])\r\n elif self.colour == \"green\":\r\n output = self.binarySearch(self.green, self.finishedList[i])\r\n elif self.colour == \"pink\":\r\n output = self.binarySearch(self.pink, self.finishedList[i])\r\n\r\n font = pg.font.SysFont(\"Times\", 20)\r\n message = str(output[3]) + \" | \" + str(output[2])\r\n rend = font.render(message, True, pg.Color(\"black\"))\r\n return (rend, rend.get_rect(topleft=(820,35+Space)))\r\n\r\n def main_loop(self):\r\n \"\"\" Makes the program loops and call certain function only if an event has been met\"\"\"\r\n i = 0\r\n\r\n\r\n\r\n \"\"\"adds sound to the code\"\"\"\r\n pg.mixer.music.load('programsound.wav')\r\n pg.mixer.music.play(-1)\r\n \r\n space = 0\r\n self.random_types()\r\n while not self.done:\r\n self.event_loop()\r\n self.input.update()\r\n self.input.draw(self.screen)\r\n self.screen.blit(*self.prompt)\r\n\r\n pg.display.update()\r\n self.clock.tick(self.fps)\r\n if self.time != 0:\r\n self.done = True\r\n self.done = False\r\n if self.time != 0:\r\n self.robot_move()\r\n pg.draw.rect(menuDisplay , green,(810,0,450,540))\r\n pg.display.update()\r\n while i != self.num_items:\r\n self.prompt = self.output_lists(i,space)\r\n self.screen.blit(*self.prompt)\r\n space = space + 20\r\n pg.display.update()\r\n i = i+1\r\n self.done = False\r\n #self.main_program()\r\n while not self.done:\r\n self.event_loop()\r\n\r\n#Sets up the start-up screen\r\nmenuDisplay.fill(white)\r\npg.draw.rect(menuDisplay , black,(800,0,10,600))\r\n#Calls mainprogram function to start the game\r\ngame_intro()\r\n\r\n\r\npg.display.update()\r\npg.quit()\r\nquit()\r\n","sub_path":"NewStartUpScreen/D5Asis.py","file_name":"D5Asis.py","file_ext":"py","file_size_in_byte":20679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"32520012","text":"import os\n\nfrom django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nfrom QandA.views import q_and_a\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'Iamlowyer.views.home', name='home'),\n # url(r'^Iamlowyer/', include('Iamlowyer.foo.urls')),\n url(r'^$',\n TemplateView.as_view(template_name='home1.html')),\n url(r'^aboutme/$',\n TemplateView.as_view(template_name='about_me.html')),\n url(r'^Q&A/$',\n q_and_a),\n\n (r'^admin/', include(admin.site.urls)),\n)\n\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^statics/(?P.*)$', 'django.views.static.serve',\n {'document_root': os.path.join(os.path.dirname(__file__),\\\n '../statics').replace('\\\\', '/')}),\n )\n","sub_path":"Iamlowyer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320885213","text":"#!/usr/bin/env Python\n# coding=utf-8\nfrom input_data_fine import *\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n level=logging.INFO,\n stream=sys.stdout)\n\nslim = tf.contrib.slim\n\ndef FCN_model(images, labels, slice_thickness):\n\n\n batch_size = tf.shape(images)[0]\n image_width = tf.shape(images)[2]\n image_height = tf.shape(images)[3]\n image_channel = tf.shape(images)[4]\n label_width = tf.shape(labels)[2]\n label_height = tf.shape(labels)[3]\n label_channel = tf.shape(labels)[4]\n\n image_slice = tf.reshape(images[:, 0, :, :, :], [batch_size, image_width, image_height, image_channel])\n label_slice = tf.reshape(labels[:, 0, :, :, :], [batch_size, label_width, label_height, label_channel])\n\n upsampled_logits_batch_1, fcn_8s_variables_mapping = FCN_8s(image_batch_tensor=image_slice,\n number_of_classes=number_of_classes,\n new_number_of_classes=3,\n is_training=True,\n is_reuse=False)\n\n upsampled_logits_batch_1 = tf.cast(upsampled_logits_batch_1, tf.float32)\n pred = tf.multiply(tf.nn.sigmoid(upsampled_logits_batch_1), tf.constant(255.0, dtype=tf.float32))\n pred0 = tf.where(tf.greater(pred, tf.multiply(tf.ones_like(pred, tf.float32), tf.constant(128.0, dtype=tf.float32))), \\\n tf.ones_like(pred, tf.float32), tf.zeros_like(pred, tf.float32))\n\n up_loss, down_loss, up_DSC, down_DSC = DSC_loss(upsampled_logits_batch_1, pred0, label_slice)\n\n total_up_loss = up_loss\n total_down_loss = down_loss\n total_up_DSC = up_DSC\n total_down_DSC = down_DSC\n\n \n total_DSC = tf.div(2*total_up_DSC, total_down_DSC)\n total_loss = tf.subtract(tf.constant(1.0, dtype=tf.float32),tf.div(total_up_loss, total_down_loss))\n\n return total_loss, total_DSC, fcn_8s_variables_mapping, pred0\n\ndef DSC_computation(label, pred):\n pred_sum = pred.sum()\n label_sum = label.sum()\n inter_sum = np.multiply(pred, label)\n inter_sum = inter_sum.sum()\n return 2 * float(inter_sum) / (pred_sum + label_sum), inter_sum, pred_sum, label_sum\n\n\nif __name__ == '__main__':\n\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n\n #npy_file_list = [\"/media/jionie/Disk1/images/0031.npy\", \"/media/jionie/Disk1/labels/0031.npy\"]\n \n #npy_file_list = [\"/media/jionie/Disk1/results/coarse_gan/fcn_vgg/test/coarse_X_1/11.npz\", \"/media/jionie/Disk1/labels/0031.npy\"]\n\n #npy_file_list = [\"/media/jionie/Disk1/results/oracle_gan/fcn_vgg/test/oracle_X_1/11.npz\", \"/media/jionie/Disk1/labels/0031.npy\"]\n\n npy_file_list = [\"/media/jionie/Disk1/results/coarse2fine/fcn_vgg/test/fusion:X_Y_Z_1/R1_11.npz\", \"/media/jionie/Disk1/labels/0031.npy\"]\n \n print(npy_file_list)\n volume_data = np.load(npy_file_list[0])\n image = volume_data['volume']\n #image[image < low_range] = low_range\n #image[image > high_range] = high_range\n label = np.load(npy_file_list[1])\n fig1 = plt.figure()\n \n image_x = image[282, :, :]\n label_x = label[282, :, :]\n image_y = image[:, 232, :]\n label_y = label[:, 232, :]\n image_z = image[:, :, 137]\n label_z = label[:, :, 137]\n\n #ax1 = fig1.add_subplot(1,2,1)\n #ax1.imshow(image_x, cmap = 'gray')\n\n #ax2 = fig1.add_subplot(1,2,2)\n #ax2.imshow(label_x, cmap = 'gray')\n\n #ax3 = fig1.add_subplot(1,2,1)\n #ax3.imshow(image_y, cmap = 'gray')\n\n #ax4 = fig1.add_subplot(1,2,2)\n #ax4.imshow(label_y, cmap = 'gray')\n\n \n\n ax5 = fig1.add_subplot(1,2,1)\n ax5.imshow(image_z, cmap = 'gray')\n\n ax6 = fig1.add_subplot(1,2,2)\n ax6.imshow(label_z, cmap = 'gray')\n \n plt.show()\n\n DSC = DSC_computation(image_z, label_z)\n print(DSC)\n\n\n \n \n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"OrganSegC2F_Fcn/show_image_result.py","file_name":"show_image_result.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"400504426","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport urllib.request\nimport PIL.Image\n\nurl = r'http://www.pythonchallenge.com/pc/def/oxygen.png'\ncut = (0, 43, 608, 44)\nurllib.request.urlretrieve(url, \"oxygen.png\")\nimg = PIL.Image.open(\"oxygen.png\")\nimg = img.crop(cut)\nimg = img.convert(\"L\")\nline = []\ndata = list(img.getdata())\nfor i in range(0, 607, 7):\n line.append(chr(data[i]))\nprint(\"\".join(line))\n\nl = [105, 110, 116, 101, 103, 114, 105, 116, 121]\nline = []\nfor i in l:\n line.append(chr(i))\nprint(\"\".join(line))\n","sub_path":"python/ch7.3.py","file_name":"ch7.3.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95654801","text":"from .models import Clients\nfrom .forms import ClientsForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\n\n@login_required(login_url='/login')\ndef read_clients(request):\n clients = Clients.objects.all()\n return render(request, 'clients/clients.html', {\"clients\": clients})\n\n@login_required(login_url='/login')\ndef new_client(request):\n if request.method == 'POST':\n form = ClientsForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('clients')\n form = ClientsForm()\n return render(request, 'clients/form-client.html', {\"form\": form})\n\n@login_required(login_url='/login')\ndef edit_client(request, id):\n client = get_object_or_404(Clients, pk=id)\n form = ClientsForm(request.POST or None, instance=client)\n if form.is_valid():\n form.save()\n return redirect('clients')\n return render(request, 'clients/form-client.html', {\"form\": form})\n\n@login_required(login_url='/login')\ndef delete_client(request, id):\n client = get_object_or_404(Clients, pk=id)\n form = ClientsForm(request.POST or None, instance=client)\n if request.method == 'POST':\n client.delete()\n return redirect('clients')\n return render(request, 'clients/confirmation.html', {\"client\": client})\n","sub_path":"clients/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"46175225","text":"import argparse\nimport json\nimport os\nimport pickle\n\nfrom collections import namedtuple\n\nimport numpy as np\n\n\ndef load_json(path):\n with open(path) as f:\n json_contents = json.load(f)\n return json_contents\n\n\n\ndef extract_exp_id(config, filter_by_dict=None):\n matching_ids = []\n for k in config:\n if config[k]['completed']:\n if filter_by_dict != None:\n match = True\n # filter out runs not matching the filter\n for key in filter_by_dict:\n nested_keys = str.split(key, '.')\n value = config[k]\n for nk in nested_keys:\n value = value[nk]\n if value != filter_by_dict[key]:\n match = False\n # config needs to match all entries in filter dict \n if match:\n matching_ids.append(k)\n else:\n matching_ids.append(k)\n\n return matching_ids\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Extract experiment results',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--repo_dir', type=str,\n help='path to repo folder',\n default='/Users/heinzec/projects/core-da/repo_dir_7jan/', \n required=False)\n parser.add_argument('-r', '--results_file', type=str,\n help='path to results file',\n default='experiments_cifar10_spatial_all.json', \n required=False)\n parser.add_argument('--epsilon', type=int,\n help='k for worst-of-k',\n default=10, required=False) \n parser.add_argument('--core_lambda', type=float,\n help='lambda for core',\n default=.25, required=False) \n parser.add_argument('--use_core', type=int,\n help='flag whether to extract core results',\n default=1, required=False)\n\n args = parser.parse_args()\n results_dict = load_json(os.path.join(args.repo_dir, args.results_file))\n chosen_lambda = args.core_lambda\n filter_dict = {}\n filter_dict['hyperparameters.attack.epsilon'] = args.epsilon\n filter_dict['lambda_core'] = chosen_lambda\n filter_dict['hyperparameters.training.use_core'] = args.use_core\n ids = extract_exp_id(results_dict, filter_dict)\n print(ids)\n","sub_path":"zuowenSTN/code/plotting/get_exp_id_linf.py","file_name":"get_exp_id_linf.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"240726920","text":"import pickle, glob,sys\nfrom collections import defaultdict\nfrom sklearn.neighbors import KDTree\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom clustering import getFeaturesFromTrainOrTest\n\ndef getLabel(path):\n with open(path) as f:\n data = list(map(lambda x: x.split('/')[1], f.readlines()))\n return data\n\ndef solution(clf, iddb):\n data = getLabel('db/db{}/train.txt'.format(iddb))\n lb_dict = defaultdict(list)\n for i in range(len(data)):\n lb_dict[clf.labels_[i]].append(data[i])\n for i in lb_dict:\n lb_dict[i] = max(set(lb_dict[i]),key=lb_dict[i].count)\n trainY = getLabel('db/db{}/test.txt'.format(iddb))\n testX = getFeaturesFromTrainOrTest('db/db{}/test.txt'.format(iddb))\n idY = clf.predict(testX)\n testY = ['']*len(idY)\n\n for i in range(len(idY)):\n testY[i] = lb_dict[idY[i]]\n\n print(accuracy_score(trainY,testY))\n\n# def mysolution(clf, typefeat, iddb):\n# lb_dict = {}\n#\n# cato, list_feat = getPathFiles(typefeat)\n# center_dataset = []\n#\n# for db in list_feat:\n# tmp = []\n# for p in db:\n# tmp.append(pickle.load(open(p,'rb'))[0])\n# center_dataset.append(sum(tmp)/len(tmp))\n#\n# tree = KDTree(center_dataset)\n# dist, l_ind= tree.query(clf.cluster_centers_,k = 1)\n#\n# for i in range(len(l_ind)):\n# lb_dict[i] = cato[l_ind[i][0]]\n# trainY = getLabel('db/db{}/test.txt'.format(iddb))\n# testX = getFeaturesFromTrainOrTest('db/db{}/test.txt'.format(iddb))\n# idY = clf.predict(testX)\n# testY = ['']*len(idY)\n#\n# for i in range(len(idY)):\n# testY[i] = lb_dict[idY[i]]\n# # print(trainY[0])\n# print(accuracy_score(trainY,testY))\n\n\ndef evakmeans(iddb):\n path_model = 'exp/db/db{}/kmeans.pkl'.format(iddb)\n clf = pickle.load(open(path_model,'rb'))\n solution(clf,iddb)\nif __name__ == '__main__':\n evakmeans(int(sys.argv[1]))\n","sub_path":"CS332.J11.KHTN/dataset/caltech256/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"435127996","text":"from copy import deepcopy\nfrom unittest import TestCase\n\nfrom BribeNet.bribery.temporal.nonBriber import NonBriber\nfrom BribeNet.bribery.temporal.randomBriber import RandomBriber\nfrom BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph\n\n\nclass TestMultiBriberRatingGraph(TestCase):\n\n def setUp(self) -> None:\n self.rg = NoCustomerActionGraph((RandomBriber(10), NonBriber(10)))\n\n def tearDown(self) -> None:\n del self.rg\n\n def test_neighbours(self):\n for i in range(len(self.rg.get_bribers())):\n for node in self.rg.get_customers():\n self.assertIsInstance(self.rg._neighbours(node, i), list)\n\n def test_p_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._p_rating(i, b) >= 0)\n\n def test_median_p_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._median_p_rating(i, b) >= 0)\n\n def test_sample_p_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._sample_p_rating(i, b) >= 0)\n\n def test_o_rating(self):\n for b in range(len(self.rg.get_bribers())):\n self.assertTrue(self.rg._o_rating(b) >= 0)\n\n def test_p_gamma_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._p_gamma_rating(i) >= 0)\n self.assertAlmostEqual(self.rg._p_gamma_rating(i, gamma=0), self.rg._p_rating(i))\n\n def test_weighted_p_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._p_gamma_rating(i) >= 0)\n\n def test_weighted_median_p_rating(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertTrue(self.rg._p_gamma_rating(i) >= 0)\n\n def test_is_influential(self):\n for b in range(len(self.rg.get_bribers())):\n for i in self.rg.get_customers():\n self.assertGreaterEqual(self.rg.is_influential(i, 0.2, b, charge_briber=False), 0)\n\n def test_bribe(self):\n for i in range(len(self.rg.get_bribers())):\n initial_value = self.rg.eval_graph(i)\n for j in self.rg.get_customers():\n g_copy = deepcopy(self.rg)\n g_copy.bribe(j, 0.1, i)\n bribed_value = g_copy.eval_graph(i)\n self.assertTrue(initial_value != bribed_value)\n\n def test_eval_graph(self):\n for b in range(len(self.rg.get_bribers())):\n self.assertGreaterEqual(self.rg.eval_graph(b), 0)\n\n def test_trust_update(self):\n # Set all votes to 0.\n g_copy = deepcopy(self.rg)\n for u in g_copy.get_customers():\n g_copy._votes[u][0] = 0\n for c in g_copy.get_customers():\n g_copy_2 = deepcopy(g_copy)\n # Then bribe one individual.\n g_copy_2.bribe(0, 1, 0)\n # Update the trust.\n g_copy_2._update_trust()\n # Make sure that the trust goes down for each connected node.\n for n in g_copy.get_customers():\n if self.rg._g.hasEdge(c, n):\n initial_trust = g_copy.get_weight(c, n)\n updated_trust = g_copy_2.get_weight(c, n)\n self.assertGreaterEqual(initial_trust, updated_trust)\n","sub_path":"test/BribeNet/graph/temporal/test_multiBriberRatingGraph.py","file_name":"test_multiBriberRatingGraph.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310370617","text":"import json\nimport re\nimport time\n\nfrom lxml import etree\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom dbModels.Handle_mangodb import mango\n\n\nclass Handle_jobList():\n def __init__(self):\n chrome_option = Options()\n chrome_option.add_argument('--headless')\n self.driver = webdriver.Chrome(options=chrome_option)\n self.url = \"https://search.51job.com/list/000000,000000,0106,00,9,99,+,2,1.html?ord_field=1\"\n self.flag = True\n self.num = {\n \"area_li\":1,\n \"worktype_tbody\":1,\n \"worktype_td\": 1,\n \"jobname_tr\": 1,\n \"jobname_td\": 1,\n }\n\n # 获取职位领域\n def parse_area(self):\n try:\n self.driver.find_element_by_xpath(\"//div[@class='j_search_in']/div[@class='e_e e_com'][1]/p[@class='at']\").click()\n except:\n pass\n html_one = etree.HTML(self.driver.page_source)\n all_area = html_one.xpath(\"//div[@id='popop']//ul/li\")\n if self.num[\"area_li\"] <= int(len(all_area)):\n self.driver.find_element_by_xpath(\"//div[@id='popop']//ul/li[{0}]\".format(self.num[\"area_li\"])).click()\n area_name = html_one.xpath(\"//div[@id='popop']//ul/li[{0}]/text()\".format(self.num[\"area_li\"]))[0].replace(\" \", \"\").replace(\"\\n\", \"\")\n self.parse_worktype(html_one, area_name)\n else:\n self.flag = False\n\n # 获取工作类型\n def parse_worktype(self,html_one,area_name):\n all_tbody = html_one.xpath(\"//div[@class='de d3']/div/table/tbody\")\n if self.num[\"worktype_tbody\"] <= int(len(all_tbody)):\n all_td = html_one.xpath(\"//div[@class='de d3']/div/table/tbody[{0}]/tr[1]/td[@class='js_more']\".format(self.num[\"worktype_tbody\"]))\n if self.num[\"worktype_td\"] <= int(len(all_td)):\n self.driver.find_element_by_xpath(\"//div[@class='de d3']/div/table/tbody[{0}]/tr[1]/td[@class='js_more'][{1}]/em\".format(self.num[\"worktype_tbody\"],self.num[\"worktype_td\"])).click()\n worktype= html_one.xpath(\"//div[@class='de d3']/div/table/tbody[{0}]/tr[1]/td[@class='js_more'][{1}]/em/text()\".format(self.num[\"worktype_tbody\"],self.num[\"worktype_td\"]))[0].replace(\" \",\"\").replace(\"\\n\",\"\")\n html_two = etree.HTML(self.driver.page_source)\n self.parse_jobname(html_two,worktype,area_name)\n else:\n self.num[\"worktype_td\"] = 1\n self.num[\"worktype_tbody\"] += 1\n else:\n self.num[\"worktype_tbody\"] = 1\n self.num[\"area_li\"] += 1\n\n # 获取岗位\n def parse_jobname(self,html_two,worktype,area_name):\n all_jobname_tr = html_two.xpath(\"//div[@class='de d3']/div/table/tbody/tr[2]/td/div/table/tbody/tr\")\n if self.num[\"jobname_tr\"] <= int(len(all_jobname_tr)):\n all_tr_td = html_two.xpath(\"//div[@class='de d3']/div/table/tbody/tr[2]/td/div/table/tbody/tr[{0}]/td[@class='js_more']\".format(self.num[\"jobname_tr\"]))\n if self.num[\"jobname_td\"] <= int(len(all_tr_td)):\n job_name = html_two.xpath(\"//div[@class='de d3']/div/table/tbody/tr[2]/td/div/table/tbody/tr[{0}]/td[@class='js_more'][{1}]/em/text()\".format(self.num[\"jobname_tr\"],self.num[\"jobname_td\"]))[0].replace(\" \", \"\").replace(\"\\n\", \"\")\n if job_name != \"所有\":\n try:\n self.driver.find_element_by_xpath(\"//div[@class='tin']/span[@class='ttag']\").click()\n except:\n pass\n self.driver.find_element_by_xpath(\"//div[@class='de d3']/div/table/tbody/tr[2]/td/div/table/tbody/tr[{0}]/td[@class='js_more'][{1}]/em\".format(self.num[\"jobname_tr\"],self.num[\"jobname_td\"])).click()\n self.driver.find_element_by_xpath(\"//div[@class='panel_lnp panel_py panel_ct2']/div[@class='but_box']/span\").click()\n self.driver.find_element_by_xpath(\"//div[@class='e_e e_but']/button\").click()\n time.sleep(1)\n self.driver.find_element_by_xpath(\"//span[@event-type='16']\").click()\n time.sleep(1)\n self.parse_data(worktype,area_name,job_name)\n self.num[\"jobname_td\"] += 1\n else:\n self.num[\"jobname_td\"] += 1\n else:\n self.num[\"jobname_td\"] = 1\n self.num[\"jobname_tr\"] += 1\n else:\n self.num[\"jobname_tr\"] = 1\n self.num[\"worktype_td\"] += 1\n\n # 获取岗位对应的编码\n def parse_data(self,worktype,area_name,job_name):\n now_url = self.driver.current_url\n self.driver.refresh()\n page_source = self.driver.page_source\n pattern_code = re.compile('https://search.51job.com/list/000000,000000,(.*?),00,9,99,')\n pattern_text = r\"window\\.__SEARCH_RESULT__ = (.*?)\\\"\n result = re.findall(pattern_text,page_source)[0]\n json_data = json.loads(result)\n info = {}\n # 工作编号\n info['job_code'] = int(pattern_code.search(now_url).group(1))\n # 工作名称\n info['job'] = job_name\n # 工作类型\n info['worktype'] = worktype\n # 工作领域\n info['area_name'] = area_name\n # 总页数\n info['total_page'] = json_data['total_page']\n # 总数据量\n info['jobid_count'] = json_data['jobid_count']\n # 采集时间\n info['crawl_time'] = time.strftime(\"%Y--%m--%d %H:%M:%S\", time.localtime(time.time()))\n # print(info)\n mango.mango_insert_data(\"job_list\",info)\n\n # 主方法\n def main(self):\n self.driver.maximize_window()\n self.driver.get(self.url)\n while self.flag:\n self.parse_area()\n\n\nif __name__ == '__main__':\n jobList = Handle_jobList()\n jobList.main()\n","sub_path":"gei_jobList.py","file_name":"gei_jobList.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"492289758","text":"import tkinter as tk\n\nwindow = tk.Tk()\nwindow.title(\"Compound interest formula\")\n\ntk.Label(window, text=\"Calculate\",\n fg=\"white\", bg=\"black\").grid(row=1)\n\nprincipal = tk.StringVar()\nprincipal.set(\"100\")\n\ninterest_rate = tk.StringVar()\ninterest_rate.set(\"0.4\")\n\nnumber_of_compound_times = tk.StringVar()\nnumber_of_compound_times.set(\"2\")\n\ntotal_time = tk.StringVar()\ntotal_time.set(\"5\")\n\ndef compounded ():\n p = float(principal.get())\n r = float(interest_rate.get())\n n = float(number_of_compound_times.get())\n t = float(total_time.get())\n\n amount = round(p*((1+(r/n))**(n*t)), 2)\n tk.Label(window, text=\"Total amount: $\"+str(amount)).grid(row=10)\n return\n\ntk.Label(window, text=\"Principal amount: \").grid(row=2)\ntk.Entry(window, textvariable=principal).grid(row=2, column=1)\n\ntk.Label(window, text=\"Interest rate: \").grid(row=3)\ntk.Entry(window, textvariable=interest_rate).grid(row=3, column=1)\n\ntk.Label(window, text=\"Number of times interest is compounded : \").grid(row=4)\ntk.Entry(window, textvariable=number_of_compound_times).grid(row=4, column=1)\n\ntk.Label(window, text=\"Total time: \").grid(row=5)\ntk.Entry(window, textvariable=total_time).grid(row=5, column=1)\n\ntk.Button(window, text=\"Calculate\", command=compounded).grid(row=8, column=1)\n\nwindow.mainloop()\n","sub_path":"calculate_compound_interest.py","file_name":"calculate_compound_interest.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124472716","text":"#!/usr/bin/env python\n\n# This takes a podcast feed and parses it into a file.\n\n# PODCAST\n# UUID -> Number\n# Title -> String\n# Author -> String\n# Description -> String\n# Category -> String\n# Image -> String\n\n# EPISODE\n# Title -> String\n# Summary -> String\n# Published Date -> Date\n# Link -> String\n# Audio Link -> String\n\nimport csv\nimport requests\nimport argparse\nimport feedparser\nimport uuid\nimport string\n\n# Command-line arguments.\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\nparser.add_argument(\"-u\", \"--url\", help=\"URL to Podcast RSS Feed. \")\nparser.add_argument(\"-s\", \"--podcast_src\", help=\"Podcast list generated with podcast_list_creator.py\")\nargs = parser.parse_args()\n\n# Transforms title into a filename.\ndef namify(title):\n return title.lower().replace(\" \", \"_\")\n\n# Take parsed podcast and return dict with uuid, title, author, description,\n# category, and image.\ndef getPodcastAttributes(podcast, custom_uuid=None):\n # Get feed info.\n feed = podcast['feed']\n # Generate uuid\n if custom_uuid == None:\n id = uuid.uuid4()\n else:\n id = custom_uuid\n # Get podcast attributes.\n podcastAttributes = {}\n podcastAttributes['uuid'] = id\n podcastAttributes['title'] = feed['title']\n podcastAttributes['author'] = feed['author']\n podcastAttributes['description'] = feed['description']\n podcastAttributes['category'] = feed['category']\n podcastAttributes['image'] = feed['image']\n\n if args.verbose:\n for key, value in podcastAttributes.items():\n print(value)\n\n return podcastAttributes\n\n# Get podcast episodes for the parsed podcast.\ndef getPodcastEpisodes(uuid, podcast):\n # Resulting list of podcast episodes.\n podcastEpisodes = []\n # Get episodes.\n entries = podcast['entries']\n # Parse episode by episode.\n for entry in entries:\n try:\n episode = {}\n episode['uuid'] = uuid\n episode['title'] = entry['title']\n episode['summary'] = entry['summary']\n episode['published'] = entry['published']\n episode_links = entry['links']\n # links = []\n for link in episode_links:\n link_href = link['href']\n if link_href.find(\"mp3\") != -1:\n # print(\"Found MP3\")\n episode['links'] = link_href\n # links.append(link_href)\n # episode['links'] = links\n podcastEpisodes.append(episode)\n if args.verbose:\n for key, value in episode.items():\n print(value)\n except Exception:\n pass\n\n return podcastEpisodes\n\n# Main\ndef main():\n print(\"Starting Podcast Feed Parser...\")\n if args.url:\n # Generate one podcast.\n url = args.url\n print(f\"Checking Podcast URL: {url}\")\n # Get Podcasts\n podcast = feedparser.parse(url)\n podcastAttributes = getPodcastAttributes(podcast)\n podcastEpisodes = getPodcastEpisodes(podcastAttributes['uuid'], podcast)\n # Get Name\n name = podcastAttributes['title']\n podcastFilename = f'{namify(name)}_podcastMetdata.csv'\n episodesFilename = f'{namify(name)}_episodes.csv'\n with open(podcastFilename, 'w') as podcastFile, open(episodesFilename, 'w') as episodesFile:\n podcastWriter = csv.writer(podcastFile)\n episodeWriter = csv.writer(episodesFile)\n podcastWriter.writerow([value for key, value in podcastAttributes.items()])\n countEpisodes = 0\n for episode in podcastEpisodes:\n countEpisodes += 1\n episodeWriter.writerow([value for key, value in episode.items()])\n print(f\"Saved info about {countEpisodes} podcast episodes.\")\n podcastFile.close()\n episodesFile.close()\n elif args.podcast_src:\n print(f\"Checking Podcast File {args.podcast_src}\")\n episodesFilename = 'episodeList.csv'\n countPodcasts = 0\n countEpisodes = 0\n with open(args.podcast_src, 'rt') as podcastList, open(episodesFilename, 'w') as episodesFile:\n podcastListReader = csv.reader(podcastList)\n episodeWriter = csv.writer(episodesFile)\n podcasts = list(podcastListReader)\n for podcast in podcasts:\n print(f\"Now parsing podcast: {podcast[1]}\")\n url = podcast[10]\n id = podcast[0]\n # print(id)\n # print(url)\n try:\n podcast = feedparser.parse(url)\n countPodcasts += 1\n podcastEpisodes = getPodcastEpisodes(id, podcast)\n for episode in podcastEpisodes:\n countEpisodes += 1\n episodeWriter.writerow([value for key, value in episode.items()])\n except Exception:\n print(f\"Passed podcast: {podcast[1]}\")\n pass\n print(f\"Saved info about {countPodcasts} podcasts.\")\n print(f\"Saved info about {countEpisodes} podcast episodes.\")\n episodesFile.close()\n podcastList.close()\n else:\n print(\"ERROR: No URL or PODCAST_SRC specified.\")\n\n\nif __name__ == \"__main__\":\n\n # calling main function\n main()\n","sub_path":"podcast_feed_parser.py","file_name":"podcast_feed_parser.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504771075","text":"#!/usr/bin/env python\nimport pytest\nimport mozloc\nimport datetime\nimport os\n\nCI = bool(os.environ['CI']) if 'CI' in os.environ else False\n\n\n@pytest.mark.skipif(CI, reason=\"CI doesn't have WiFi\")\ndef test_nm_loc():\n loc = mozloc.get_nmcli()\n assert isinstance(loc, dict)\n assert isinstance(loc['t'], datetime.datetime)\n\n\n@pytest.mark.skipif(CI, reason=\"CI doesn't have WiFi\")\ndef test_nm_connection():\n mozloc.nm_config_check()\n\n\nif __name__ == '__main__':\n pytest.main(['-xrsv', __file__])\n","sub_path":"tests/test_all.py","file_name":"test_all.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358338007","text":"\"\"\"Final URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom Tienda import views\r\nfrom Tienda.views import registro, agregar, modificar, eliminar, acerca\r\n\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', views.index, name='index'),\r\n path('/',views.detalle,name='detalle'),\r\n path('carrito/',views.order,name='order'),\r\n path('filtro_secciones/', views.filtro_secciones, name=\"filtro_secciones\"),\r\n path('accounts/',include('django.contrib.auth.urls')),\r\n path('registro/', registro, name=\"registro\"),\r\n path('agregar/', agregar, name=\"agregar\"),\r\n path('modificar//', modificar, name=\"modificar\"),\r\n path('eliminar//', eliminar, name=\"eliminar\"),\r\n path('acerca/', acerca, name=\"acerca\"),\r\n\r\n]\r\n","sub_path":"Final/Final/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"291788962","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom flask import Flask\nimport os\nfrom oslo_config import cfg\nfrom python_nemesis.config import collect_sqlalchemy_opts\nfrom python_nemesis.config import register_opts\nfrom python_nemesis.extensions import db\nfrom python_nemesis.extensions import keystone\nfrom python_nemesis.extensions import log\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"Register configured blueprints into app object.\n\n :param app: The application object to which configuration should\n be applied.\n :type app: :py:class:`flask.Flask`\n :param blueprints: list of blueprints to be registered.\n :type blueprints: list(:py:class:`flask.Blueprint`)\n \"\"\"\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n\n\ndef configure_app(app):\n \"\"\"Retrieve App Configuration.\n\n configure_app first loads default configuration and then attempts\n to override defaults using a file specified in env:NEMESIS_CONFIG.\n\n :param app: The application object to which configuration should\n be applied.\n :type app: :py:class:`flask.Flask`\n \"\"\"\n app.config.from_object('python_nemesis.default_config')\n app.config[\"cfg\"] = cfg.CONF\n config_file = os.environ.get(\n \"NEMESIS_CONFIG\",\n \"/etc/nemesis/nemesis.conf\")\n\n register_opts(app.config[\"cfg\"], config_file)\n collect_sqlalchemy_opts(app, app.config[\"cfg\"])\n\n\ndef configure_extensions(app):\n \"\"\"Initialize extensions for Flask.\n\n This function is intended for use with the app factory style\n of Flask deployment.\n\n :param app: The application object to which configuration should\n be applied.\n :type app: :py:class:`flask.Flask`\n \"\"\"\n db.init_app(app)\n log.init_app(app)\n keystone.init_app(app)\n\n\ndef create_app(app_name=None, blueprints=None):\n \"\"\"Create the flask app.\n\n This function is intended to be used with the app factory\n style of Flask deployment.\n\n :param str app_name: Name to be used internally within flask.\n :param blueprints: Blueprints to be registered.\n :type blueprints: list(:py:class:`flask.Blueprint`)\n :returns: The created app.\n :rtype: :py:class:`flask.Flask`\n \"\"\"\n app = Flask(app_name)\n\n configure_app(app)\n configure_extensions(app)\n\n # Here we register the application blueprints.\n from python_nemesis.api.v1 import V1_API\n blueprints = [V1_API]\n configure_blueprints(app, blueprints)\n\n return app\n\n\nif __name__ == \"__main__\": # pragma: no cover\n app = create_app('nemesis-api')\n app.run(threaded=True)\n","sub_path":"python_nemesis/base_app.py","file_name":"base_app.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"64630034","text":"# %% \nimport numpy as np\nimport matplotlib.pyplot as plt\n# %%\n# Constants\npi = 3.14159\nx = np.linspace(0,20,20)\n\n# %%\ndef func_y(x):\n y = 0.5*x + x*np.sin(2*pi*x/10)\n return (y)\n\n# %%\n# 1a) Plot\ny = func_y(x)\nplt.plot(x,y)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Part 1a')\nplt.show()\n\n# %%\n# 1b) Integrate\nx_int = np.linspace(0,20,20000)\ny_int = func_y(x_int)\ndelx = 20/20000\n\nI = 0\nfor i in range (20000):\n I = delx*y_int[i] + I\nprint('Part 1b) I=',I)\n\n# %%\n# 1b) Plot\nplt.plot(x_int, y_int)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Part 1b')\nplt.show()\n\n# %%\n# 1c) Gaussian quadrature\n\n# i) n = 2\nn_i = 2\nGammak_i = 0.5773502692\nwk_i = 1\na = 0 #?\nb = pi/2 #?\n\nxk_i = [(b+a)/2 + ((b-a)/2)*Gammak_i,(b+a)/2 + ((b-a)/2)*(-1)*Gammak_i]\nsum_m = 0\n\nfor i in range(n_i):\n sum_m = wk_i*func_y(xk_i[i]) + sum_m\n\nintegral_ab = ((b-a)/2)*sum_m\nprint('Part 1ci) I = ',integral_ab)\n\n# %%\n# 1c) Gaussian quadrature TAKE 2\n\n# i) n = 2\nn = [2,4,6,8]\nn = np.array(n)\nGammak_i = [[0.5773502692],\n [0.3399810436, 0.8611363116],\n [0.2386191861,0.6612093865,0.9324695142],\n [0.1834346425,0.5255324099, 0.7966664774, 0.9602898565]]\n\nGammak_i = np.matrix(Gammak_i)\n\nwk_i = [[1],\n [0.6521451549, 0.3478548451],\n [0.4679139346, 0.3607615730, 0.1713244924],\n [0.3626837834, 0.3137066459, 0.2223810345, 0.1012285362]]\nwk_i = np.matrix(wk_i)\n\na = 0 #?\nb = pi/2 #?\n\n\nsum_m = 0\nxk = np.zeros((len(n),n[-1]))\n\n#j is looping through the maximum amount of spots which doesnt work for\n# every row \n\n# NEEDS FIXING\n\nfor i in range(int(len(n))-1):\n for j in range(len(Gammak_i[i,:])):\n if j == 0 or j==2 or j==4 or j==6:\n xk[i,j] = (b+a)/2 + ((b-a)/2)*(Gammak_i[i,j])\n elif j == 1 or j==3 or j==5 or j==7:\n xk[i,j] = (b+a)/2 + ((b-a)/2)*(-1)*Gammak_i[i,j]\n\nprint(xk)\n\nfor i in range(len(n)):\n for j in range(int(n[i]/2)):\n sum_m = wk_i*func_y(xk[i,j]) + sum_m\n \n\nintegral_ab = ((b-a)/2)*sum_m\nprint(integral_ab)\n\n","sub_path":"HW10/HW10_v1.py","file_name":"HW10_v1.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"421638444","text":"#!/usr/bin/env python3\n\n\"\"\"\nYou are given a dictionary of an arbitrarily nesting level.\n\nWrite a method which will fold multiple-level nested dictionary\ninto an object with attributes like follows:\n\nprint(result['users.17.age']) # 23\nprint(result['groups.6']) # Boulder\n\nIt is desirable to write solutions using cycles and recursion and\nexplain pros and cons of both.\n\"\"\"\n\nimport pprint\n\n\nEXAMPLE = {\n 'users': {\n 17: {\n 'age': 23,\n 'name': 'Domen Skofic'\n },\n 23: {\n 'age': 26,\n 'name': 'Jakob Schubert'\n }\n },\n 'groups': {\n 12: 'Lead',\n 6: 'Boulder'\n }\n}\n\n# Below are two function-based solutions.\n# Return unfolded dictionaries all at once.\n\ndef unfold_iter(original_dict):\n \"\"\"Unfold nested dictionary using iteration\"\"\"\n\n # create a modified copy of the original dictionary\n resulting_dict = {str(key):value for key, value in original_dict.items()}\n unverified_items = [str(key) for key in original_dict]\n\n # Below C-style iteration solution is necessary because it's\n # not safe to modify an object while iterating through it\n i = 0\n while i < len(unverified_items):\n key = unverified_items[i]\n if hasattr(resulting_dict[key], 'items'):\n for nkey, nvalue in resulting_dict[key].items():\n resulting_dict[key+'.'+str(nkey)] = nvalue\n unverified_items.append(key+'.'+str(nkey))\n del resulting_dict[key]\n i += 1\n return resulting_dict\n\n\ndef unfold_recur(original_dict):\n \"\"\"Unfold nested dictionary using recursion\"\"\"\n\n resulting_dict = {}\n for key, value in original_dict.items():\n try:\n resulting_dict.update(\n {str(key)+'.'+str(nkey):nvalue for nkey, nvalue in unfold_recur(value).items()}\n )\n except AttributeError:\n resulting_dict[str(key)] = value\n\n return resulting_dict\n\n\n# Below are two class-based solutions.\n# Store the original dictionary as is and return nested values on the fly\n\nclass UnfoldIter():\n \"\"\"Unfold nested dictionary using iteration\"\"\"\n def __init__(self, data):\n self.data = data\n\n def __getitem__(self, key):\n value = self.data\n for nkey in key.split('.'):\n value = self.get_value(value, nkey)\n return value\n\n def get_value(self, dictionary, key):\n \"\"\"Get value from dictionary no matter if key is str or not\"\"\"\n try:\n return dictionary[key]\n except KeyError:\n # integer keys, for example\n return dictionary[eval(key)]\n\n\nclass UnfoldRecur(UnfoldIter):\n \"\"\"Unfold nested dictionary using recursion\"\"\"\n def __init__(self, data):\n # ndata will be a mutable storage for recursion\n self.ndata = data\n UnfoldIter.__init__(self, data)\n\n def __getitem__(self, key):\n if '.' not in key:\n result = self.get_value(self.ndata, key)\n self.ndata = self.data #Clean before returning\n return result\n else:\n nkey = key.split('.')\n key = nkey.pop(0)\n nkey = '.'.join(nkey)\n self.ndata = self.get_value(self.ndata, key)\n return self.__getitem__(nkey)\n\n\nif __name__ == '__main__':\n print('\\nFunction-based solutions')\n print('\\nIteration function result:\\n')\n pprint.pprint(unfold_iter(EXAMPLE))\n print('\\nRecursive function result:\\n')\n pprint.pprint(unfold_recur(EXAMPLE))\n print('\\nBelow are class-based solutions')\n print('\\nIteration object:\\n')\n RESULT_ITER = UnfoldIter(EXAMPLE)\n print(RESULT_ITER['users.17.age'])\n print(RESULT_ITER['groups.6'])\n print('\\nRecursion object:\\n')\n RESULT_RECUR = UnfoldRecur(EXAMPLE)\n print(RESULT_RECUR['users.17.age'])\n print(RESULT_RECUR['groups.6'])\n","sub_path":"test_task_jera/3/fold_nested_dict.py","file_name":"fold_nested_dict.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"65267739","text":"import numpy as np\nimport os\nimport sys\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport re\nimport seaborn as sns\n\n\nsns.set()\nsns.set_style(\"whitegrid\", {'axes.grid' : True})\nsns.set_color_codes()\nsns.set_context(\"notebook\", font_scale=1.5, rc={\"lines.linewidth\": 2.5, 'lines.markeredgewidth': 1., 'lines.markersize': 10})\ncolors = [\"b\", \"g\", \"r\", \"y\", \"m\"]\nfont = {'family' : 'serif'}\nmpl.rc('font', **font)\nmpl.rcParams['ps.useafm'] = True\nmpl.rcParams['pdf.use14corefonts'] = True\n\n\n\ncols=['tiled','tile_size','data_type','matrix_width','zfp_rate','cache_size','is_zfp',\n\t'Iteration','Time','RMSE']\n\n\ndf = pd.read_csv(\"matmatmult_df.csv\")\ndf['FLOPS']= (2 * df['matrix_width']**3) / (df['Time'])\ndf['Megaflops']=df['FLOPS']/2**20\n\n\n\n#computes best possible cache_size for each configuration\nzfp_df = df.loc[(df['is_zfp'] == True) & (df['data_type'] == 'double') & (df['matrix_width'] <= 2048) & (df['Time'] > 0)]\nbest_runs_df=pd.DataFrame(columns=['tile_size','matrix_width','zfp_rate','cache_size','Time'])\nfor tile_size in zfp_df['tile_size'].unique():\n\tfor matrix_width in zfp_df['matrix_width'].unique():\n\t\tfor zfp_rate in zfp_df['zfp_rate'].unique():\n\t\t\tindex = zfp_df.loc[(tile_size == zfp_df['tile_size']) & (matrix_width == zfp_df['matrix_width']) & (zfp_rate == zfp_df['zfp_rate'])]\n\t\t\tindex = index.groupby(['tile_size','matrix_width','zfp_rate','cache_size'], as_index=False)['Time'].mean()\n\t\t\tmin_time=index['Time'].min()\n\t\t\tbest_run=index.loc[index['Time'] == min_time]\n\t\t\tbest_runs_df=best_runs_df.append(best_run, ignore_index=True)\n#print(best_runs_df)\n\nbest_unique_times_df = best_runs_df[['tile_size','matrix_width','zfp_rate','Time']].drop_duplicates()\nprint(best_unique_times_df)\nzeroCache_times_df=df.loc[(df['is_zfp'] == True) & (df['data_type'] == 'double') & (df['matrix_width'] <= 2048) & (df['Time'] > 0) & (df['cache_size'] == 0)][['tile_size','matrix_width','zfp_rate','Time']]\nzeroCache_times_df=zeroCache_times_df.groupby(['tile_size','matrix_width','zfp_rate'], as_index=False)['Time'].mean()\nprint(zeroCache_times_df)\n\n# i will need to figure out these later, for now I loop\n#allVals = pd.concat([best_unique_times_df,zeroCache_times_df],join='inner',join_axes=['tile_size','matrix_width','zfp_rate'])\n#allVals = pd.merge(best_unique_times_df,zeroCache_times_df,how='left',left_on=['tile_size','matrix_width','zfp_rate','Time'], right_on=['tile_size','matrix_width','zfp_rate','Time'])\nallVals = pd.DataFrame(columns=['tile_size','matrix_width','zfp_rate','Best Time', 'Default Cache Time'])\nfor index, row in best_unique_times_df.iterrows():\n\tallVals = allVals.append({\n\t\t'tile_size':row['tile_size'],\n\t\t'matrix_width':row['matrix_width'],\n\t\t'zfp_rate':row['zfp_rate'],\n\t\t'Best Time':row['Time'],\n\t\t'Default Cache Time':zeroCache_times_df[(zeroCache_times_df['tile_size'] == row['tile_size']) & (zeroCache_times_df['matrix_width'] == row['matrix_width']) & (zeroCache_times_df['zfp_rate'] == row['zfp_rate'])]['Time'].values[0]\n\t},ignore_index=True)\nprint(allVals)\nallVals['Speedup'] = allVals['Default Cache Time'] / allVals['Best Time']\nprint(allVals)\n\nfor rate in [4, 8, 16, 32, 48]:\n\tfig=sns.lineplot(x='matrix_width',y='Speedup',data=allVals[(allVals['zfp_rate'] == rate) & (allVals['matrix_width'] <= 600) ], ci='sd', label=str(rate))\nplt.ylabel('Speedup over Default ZFP')\nplt.ylabel('n')\nplt.legend(title=\"ZFP Rate\")\nplt.tight_layout()\nplt.savefig(\"images/Speedup.pdf\")\n\n#q = df.loc[(df['is_zfp'] == True) & (df['data_type'] == 'double') & (df['matrix_width'] == 2048) & (df['zfp_rate'] == 8)]\n#print(q)\n\n# RUNTIME VS PS for rate w/ reasonably low rmse (below 48)\n# 1. characterize default performance\n# 2. tuning determine run with lowest execution on a per accuracy basis\n# 3. determine performance difference between 1 and 2.\n# Need Paper-Ready Figures by Monday.\n\n\n\n\n\t\n\n\n","sub_path":"speedup_plotter.py","file_name":"speedup_plotter.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"568772774","text":"\"\"\"\nDjango settings for people project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\nimport django.conf.global_settings as DEFAULT_SETTINGS\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(os.environ.get('DJANGO_DEBUG', ''))\nTEMPLATE_DEBUG = DEBUG\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'people',\n 'avatar',\n 'south'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + (\n 'people.context_processors.segment_io',\n)\n\nROOT_URLCONF = 'people.urls'\n\nWSGI_APPLICATION = 'people.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n# Parse database configuration from $DATABASE_URL\nimport dj_database_url\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Heroku configuration.\n# See https://devcenter.heroku.com/articles/getting-started-with-django#settings-py.\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Allow all host headers\nALLOWED_HOSTS = ['*']\n\n# Static asset configuration\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nSTATIC_ROOT = 'staticfiles'\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\n# User profile\nAUTH_PROFILE_MODULE = 'people.UserProfile'\n\n# None - Segment.io integration disabled\nSEGMENT_IO_API_WRITE_KEY = os.environ.get('SEGMENT_IO_API_WRITE_KEY', None)\n\nLOGIN_REDIRECT_URL = '/'\n\n# Sendgrid\nEMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']\nEMAIL_HOST= 'smtp.sendgrid.net'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\nEMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']\n\n# Request to join\nREQUEST_TO_JOIN_EMAILS = os.environ.get('REQUEST_TO_JOIN_EMAILS', '').split(',')\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda o: \"/%s/\" % o.username,\n}","sub_path":"people/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"501190931","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nimport time\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(325, 237)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(110, 20, 61, 16))\n self.label.setObjectName(\"label\")\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit.setGeometry(QtCore.QRect(90, 60, 104, 71))\n self.textEdit.setObjectName(\"textEdit\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(100, 150, 75, 23))\n self.pushButton.setObjectName(\"pushButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 325, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"test window\"))\n self.label.setText(_translate(\"MainWindow\", \"pyqt5 tests\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"test button\"))\n self.pushButton.clicked.connect(self.label_change)\n self.thread_start = MyThread()\n self.thread_start.ard_signal.connect(self.label.setText)\n self.thread_start.start()\n\n def label_change(self):\n self.pushButton.setText('Button Clicked!')\n self.textEdit.setText('taco')\n\n\nclass MyThread(QtCore.QThread):\n ard_signal = QtCore.pyqtSignal(str)\n\n def __init__(self):\n QtCore.QThread.__init__(self)\n\n def run(self):\n while 1:\n time.sleep(3)\n self.ard_signal.emit('some string')\n\n\ndef process_signal(result):\n print(result)\n sys.exit()\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())","sub_path":"thread/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"294614597","text":"#this creates a temp object\nmyfile=open(\"fruits.txt\")\n#this is how you read it\na=myfile.read()\nprint(type(a))\nprint(myfile.read())\nmyfile.close()\n#cursor moves down after reading once so repeating the read print does not double print the file only prints once\n\n# this is better file is only opened under indentation\n\nwith open(\"fruits.txt\") as aa:\n content=aa.read()\nprint(content)\n\n#different filepaths\n\nwith open(\"lmao/newfroots.txt\") as aa:\n content=aa.read()\nprint(content)\n\n\n\n################################################\n#writing\nwith open(\"lmao/anotherdoc.txt\", \"w\") as writt:\n writt.write(\"Bro\\nIam\\nNot\\nDoing\\nWell\\n\")\n\n\n#def foo(character, filepath=\"bear.txt\"):\n# file = open(filepath)\n# content = file.read()\n# return content.count(character)\n\n\n\n# How to write on a preexisting file \n\nwith open(\"fruits.txt\", \"a+\") as myfile:\n myfile.write(\"\\nOkra\")\n myfile.seek(0)\n #put cursor at top after reading fruits\n content=myfile.read()\n\n\nprint(content)","sub_path":"Not_app_code/the_basics/readwrite.py","file_name":"readwrite.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"426464553","text":"from BackEnd.InstanceProblem.CVRP import _ACO, _GA, _SA\nfrom BackEnd.InstanceProblem.INSTANCE import Instance\nimport time\n\n\ndef execute_some_method(problem, method, parameters):\n instance = Instance()\n instance.load_instance(problem)\n x = time.time()\n if method == 'Ant Colony Optimization':\n path, routes, cost, fitness_list = _ACO.ACO(parameters).solve(instance)\n elif method == 'Genetic Algorithm':\n path, routes, cost, fitness_list = _GA.GA(parameters).solve(instance)\n else:\n path, routes, cost, fitness_list = _SA.SA(parameters).solve(instance)\n return [(time.time() - x), path, routes, cost, instance.node_coord, fitness_list]\n\n\n\n","sub_path":"BackEnd/InstanceProblem/CVRP/execute_vrp.py","file_name":"execute_vrp.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"224350033","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\n\n# Reading input\nN = int(input())\ntreasures = list(map(int, input().split()))\nbridges = [tuple(map(int, input().split()))\n for _ in range(N-1)]\n\n# Constructing graph\nrelationships = defaultdict(list)\nfor (n1, n2, s) in bridges:\n relationships[n1].append((n2, s))\n relationships[n2].append((n1, s))\n\n# Constructing tree\nchildren = {}\n\n\ndef add_children(children, node, parent):\n \"\"\"Recursively add children for a given node.\n \"\"\"\n children[node] = []\n for (other, strength) in relationships[node]:\n if other != parent:\n children[node].append((other, strength))\n add_children(children, other, node)\n\n\nadd_children(children, 0, 0)\n\n# Find the maximal amount of gold we can get\n\n\ndef most_gold(children, node, limit):\n \"\"\"Recursively get the biggest amount of gold we can get at a given node\n \"\"\"\n gold_from_each = [\n most_gold(children, other, strength)\n for (other, strength) in children[node]\n ] + [0]\n income = sum(gold_from_each) + treasures[node]\n if limit is not None:\n return min(limit, income)\n else:\n return income\n\n\nprint(most_gold(children, 0, None))","sub_path":"cb 2018/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"382650777","text":"from starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import JSONResponse\nfrom starlette.config import Config\nimport uvicorn\nimport os\nfrom fastai import *\nfrom fastai.vision import *\nimport urllib\nfrom io import BytesIO\nimport aiohttp\n\nasync def get_bytes(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.read()\n\napp = Starlette(debug=True)\n\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['*'], allow_methods=['*'])\n\n### EDIT CODE BELOW ###\n\nanswer_question_1 = \"\"\"\nUnderfitting: if our training data is not enough we will not be able to generalize, that means that our model will not be able to predict due to the lack of information\nOverfitting: its when our model learns a bunch of particular cases that we provide but its incapable of recognizing new slightly different data\n\n\"\"\"\n\nanswer_question_2 = \"\"\"\nWe use it when the loss function is non-linear, and allow us to find the lowest value of a function. How it works: before anything the algorithm choose a random value of the\nfunction and with every iterarion and using the gradient to know in wich direction to proced and learning rate to get magnitude of the step, updates the function so we get\ncloser to the lowest point.\n\"\"\"\n\nanswer_question_3 = \"\"\"\nThe main goal of regression is to analyze and predict variables using more variables. training with datasets as a start to predict continuous-valued output\n\"\"\"\n\n\nmodel_50 = load_learner('.', file='export.pkl')\n\n@app.route(\"/api/answers_to_hw\", methods=[\"GET\"])\nasync def answers_to_hw(request):\n return JSONResponse([answer_question_1, answer_question_2, answer_question_3])\n\n@app.route(\"/api/class_list\", methods=[\"GET\"])\nasync def class_list(request):\n return JSONResponse([ 'nes','supern','n64','gamecube' ])\n\n@app.route(\"/api/classify\", methods=[\"POST\"])\nasync def classify_url(request):\n body = await request.json()\n url_to_predict = body[\"url\"]\n\n bytes = await get_bytes(url_to_predict)\n img = open_image(BytesIO(bytes))\n preds, _, _ = model_50.predict(img)\n\n return JSONResponse({\n \"predictions\": str(preds),\n })\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=int(os.environ['PORT']))\n","sub_path":"Game Clasifier/py/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"39471992","text":"#Given: an array containing hashes of names\r\n\r\n#Return: a string formatted as a list of names separated by commas except for the last two names, which should be separated by an ampersand.\r\n\r\n#Example:\r\n\r\n#namelist([ {'name': 'Bart'}, {'name': 'Lisa'}, {'name': 'Maggie'} ])\r\n# returns 'Bart, Lisa & Maggie'\r\n\r\n#namelist([ {'name': 'Bart'}, {'name': 'Lisa'} ])\r\n# returns 'Bart & Lisa'\r\n\r\n#namelist([ {'name': 'Bart'} ])\r\n# returns 'Bart'\r\n\r\n#namelist([])\r\n# returns ''\r\n\r\n#Note: all the hashes are pre-validated and will only contain A-Z, a-z, '-' and '.'.\r\n\r\ndef namelist(names):\r\n b = \"\"\r\n i = 0\r\n if len(names) == 0:\r\n return ''\r\n elif len(names) < 2:\r\n return names[0]['name']\r\n elif len(names) == 2:\r\n return names[0]['name'] + ' ' + '&' + ' ' + names[1]['name']\r\n elif len(names) > 2:\r\n while i < (len(names) - 1):\r\n b = b + names[i]['name'] + ',' + ' '\r\n i += 1\r\n return b[:-2] + ' ' + '&' + ' ' + names[len(names)-1]['name']\r\n","sub_path":"Format a string of names like 'Bart, Lisa & Maggie'..py","file_name":"Format a string of names like 'Bart, Lisa & Maggie'..py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"325460091","text":"# This is a function that looks like a decorator but used by AutoSynchronized to\n# inject a _auto_lock member on every instance of a class that uses our metaclass.\n# This way we don't have to think about locking on methods.\nfrom threading import Lock\nimport sys\nimport glob\n\ndef wrap_init_with_lock(org_init):\n def wrapped_init(self, *args, **kwargs):\n org_init(self, *args, **kwargs)\n ports = _serial_ports()\n serial_locks = {}\n for serial in ports:\n serial_locks[serial] = Lock()\n self._auto_locks = serial_locks\n return wrapped_init\n\ndef _serial_ports():\n \"\"\"Lists serial ports\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of available serial ports\n \"\"\"\n if sys.platform.startswith('win'):\n ports = ['COM' + str(i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('linux2') or sys.platform.startswith('cygwin'):\n ports = glob.glob('/dev/ttyACM*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.usbmodem*')\n else:\n raise EnvironmentError('Unsupported platform')\n return ports\n\nclass Allocate_port_locks(type):\n \"\"\"\n This is a metaclass, a class describing how classes should be built. This\n new metaclass wraps the init method with a new version that will add a lock\n object to self.\n\n It then provides a hook so that calling a method and prepending 'synchronized_'\n to the method name will obtain the injected lock before calling the method and \n release it when leaving the method. No further work is required other than calling\n the method.\n \"\"\"\n def __init__(cls, name, bases, namespaces):\n super(type, cls).__init__(name, bases, namespaces)\n cls.__init__ = wrap_init_with_lock(cls.__init__)\n","sub_path":"synchronize.py","file_name":"synchronize.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"283819504","text":"# Create a function that takes a filename and a string as parameter,\n# And writes the string got as second parameter into the file 10 times.\n# If the writing succeeds, the function should return True.\n# If any problem raises with the file output, the function should not break, but return False.\n# Example: when called with the following two parameters: \"tree.txt\", \"apple\",\n# the function should write \"appleappleapple\" to the file \"tree.txt\", and return True.\n\ndef write_to_file(file_name, string):\n try:\n fr = open(file_name, \"w\")\n fr.write(string * 10)\n fr.close\n return True\n except:\n return False\n\nprint(write_to_file(\"second_tree.txt\", \"apple\"))\n","sub_path":"exam python/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"235933811","text":"import sys\nimport json\nimport lcddriver\n\nlcd = lcddriver.lcd()\nlcdStr1 = \"SecSys Armed\"\nlcdStr2 = \"Detected: \"\nlcdWarn = \"Motion detected\"\n\nsrc = \"values.json\"\n\ndef main():\n\tif sys.argv[1] == \"1\":\n\t\tsrcFile = open(src,\"r\")\n\t\tsrcDict = json.load(srcFile)\n\t\tsrcFile.close()\n\t\ttry:\n\t\t\tsrcDict[\"motion\"] += 1\n\t\texcept KeyError:\n\t\t\tsrcDict[\"motion\"] = 0\n\t\tdumpDict(srcDict)\n\t\tlcd.display_string(lcdStr1,1)\n\t\tlcd.display_string(lcdWarn,2)\n\t\tquit() \n\telif sys.argv[1] == \"0\":\n\t\tcreateSrcDict()\n\t\tquit()\n\telif sys.argv[1] == \"StandBy\":\n\t\tsrcFile = open(src,\"r\")\n\t\tsrcDict = json.load(srcFile)\n\t\tsrcFile.close()\n\t\tlcd.display_string(lcdStr1,1)\n\t\ttry:\n\t\t\tlcd.display_string(lcdStr2+str(srcDict[\"motion\"]),2)\n\t\texcept KeyError:\n\t\t\tlcd.display_string(lcdStr2+\"0\",2)\n\t\t\n\t\tquit()\n\telif sys.argv[1] == \"dummyR\":\n\t\tsrcFile = open(\"dummy.txt\",\"r\")\n\t\tfor line in srcFile.readlines():\n\t\t\tprint(line)\n\t\tsrcFile.close()\n\telif sys.argv[1] == \"dummyW\":\n\t\tsrcFile = open(\"dummy.txt\", \"w\")\n\t\tsrcFile.write(\"Esto es una reescritura\")\n\t\tsrcFile.close()\n\telse:\n\t\tpass\n\ndef dumpDict(srcDict):\n\tsrcFile = open(src,\"w\")\n\tjson.dump(srcDict, srcFile)\n\tsrcFile.close()\t\n\t\t\ndef createSrcDict():\n\tprint(\"Creando archivo...\")\n\tsrcDict = {\"motion\": 0}\n\tprint(\"Escribiendo\")\n\tdumpDict(srcDict)\n\tprint(\"Ok\")\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"606938792","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nDOCUMENTATION='''\nmodule: mysql_request\nauthor: David GILLARD\ndescription: Module to do simple SQL request against MySQL database\n\noptions:\n db_name:\n description: DB name\n required: yes\n request:\n description: SQL request to execute\n required: yes\n\n'''\n\nEXAMPLES='''\n- name: \"Simple SELECT\"\n mysql_request:\n db_name: \"test\"\n request: \"SELECT 1\"\n'''\n\nRETURN = '''\nresults:\n description: return all results\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n###import MySQLdb\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n db_name = dict(required=True, type='str'),\n request = dict(required=True, type='str'),\n login_unix_socket = dict(default=None),\n login_port = dict(default=3306, type='int'),\n config_file = dict(default=\"~/.my.cnf\", type='path')\n )\n )\n\n # Retrieving options value\n db_name = module.params.get('db_name')\n request = module.params.get('request')\n login_unix_socket = module.params.get('login_unix_socket')\n login_port = module.params.get('login_port')\n config_file = module.params.get('config_file')\n\n # Connect to your database\n db = MySQLdb.connect(db=db_name, login_unix_socket=login_unix_socket, config_file=\"~/.my.cnf\")\n # Get a cursor, execute your request then close connection\n cur = db.cursor()\n cur.execute(request)\n results = cur.fetchall()\n db.close()\n # Return result\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n mysql_results=results\n )\n )\n\nif __name__ == \"__main__\":\n main()","sub_path":"library/mysql_request.py","file_name":"mysql_request.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"93181918","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport socket\nimport os\n\nfrom ..util_core.v2ray import restart, V2ray\nfrom ..util_core.writer import GroupWriter\nfrom ..util_core.group import Mtproto, SS\nfrom ..util_core.selector import GroupSelector\nfrom ..util_core.utils import get_ip, gen_cert, readchar, is_ipv4\n\nclass TLSModifier:\n def __init__(self, group_tag, group_index, domain='', alpn=None, xtls=False):\n self.domain = domain\n self.alpn = alpn\n self.xtls = xtls\n self.writer = GroupWriter(group_tag, group_index)\n \n @restart(True)\n def turn_on(self, need_restart=True):\n print(\"\")\n print(_(\"1. Let's Encrypt CERTIFICADO(AUTO GENERADO, PREPARE EL DOMINIO)\"))\n print(_(\"2. CERTIFICADO MODIFICADO (PREPARE CERTIFICADO PERSONALIZADO)\"))\n print(\"\")\n choice = readchar(_(\"SELECCIONE: \"))\n input_domain = self.domain\n if choice == \"1\":\n if not input_domain:\n local_ip = get_ip()\n input_domain = input(_(\"INGRESE EL DOMINIO DE SU VPS: \"))\n try:\n if is_ipv4(local_ip):\n socket.gethostbyname(input_domain)\n else:\n socket.getaddrinfo(input_domain, None, socket.AF_INET6)[0][4][0]\n except Exception:\n print(_(\"EL DOMINIO NO ESTA ASOCIADO!!!\"))\n print(\"\")\n return\n\n print(\"\")\n print(_(\"GENERANDO AUTOMATICAMENTE CERTIFICADO SSL, ESPERE....\"))\n V2ray.stop()\n gen_cert(input_domain)\n crt_file = \"/root/.acme.sh/\" + input_domain +\"_ecc\"+ \"/fullchain.cer\"\n key_file = \"/root/.acme.sh/\" + input_domain +\"_ecc\"+ \"/\"+ input_domain +\".key\"\n\n self.writer.write_tls(True, crt_file=crt_file, key_file=key_file, domain=input_domain, alpn=self.alpn, xtls=self.xtls)\n\n elif choice == \"2\":\n crt_file = input(_(\"please input certificate cert file path: \"))\n key_file = input(_(\"please input certificate key file path: \"))\n if not os.path.exists(crt_file) or not os.path.exists(key_file):\n print(_(\"certificate cert or key not exist!\"))\n return\n if not input_domain:\n input_domain = input(_(\"please input the certificate cert file domain: \"))\n if not input_domain:\n print(_(\"DOMINIO INVALIDO!!\"))\n return\n self.writer.write_tls(True, crt_file=crt_file, key_file=key_file, domain=input_domain, alpn=self.alpn, xtls=self.xtls)\n else:\n print(_(\"ERROR!\"))\n return\n return need_restart\n\n @restart()\n def turn_off(self):\n self.writer.write_tls(False)\n return True\n\ndef modify():\n gs = GroupSelector(_('modify tls'))\n group = gs.group\n\n if group == None:\n pass\n else:\n if type(group.node_list[0]) == Mtproto or type(group.node_list[0]) == SS:\n print(_(\"MTProto/Shadowsocks NO SOPORTAN HTTPS!!!\"))\n print(\"\")\n return\n tm = TLSModifier(group.tag, group.index)\n tls_status = 'open' if group.tls == 'tls' else 'close'\n print(\"{}: {}\\n\".format(_(\"ESTADO DE TLS\"), tls_status))\n print(\"\")\n print(_(\"1. ABRIR TLS\"))\n print(_(\"2. CERRAR TLS\"))\n choice = readchar(_(\"SELECCIONE: \"))\n if not choice:\n return\n if not choice in (\"1\", \"2\"):\n print(_(\"ERROR, INGRESE DE NUEVO:\"))\n return\n\n if choice == '1':\n tm.turn_on()\n elif choice == '2':\n tm.turn_off()","sub_path":"v2ray_util/config_modify/tls.py","file_name":"tls.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395403706","text":"import datetime\nimport collections\nimport boto3\n\ncloudtrail = boto3.client('cloudtrail')\n\n\ndef lambda_handler(event, context):\n account_id = event['account_id']\n time_discovered = event['time_discovered']\n username = event['username']\n deleted_key = event['deleted_key']\n exposed_location = event['exposed_location']\n endtime = datetime.datetime.now()\n interval = datetime.timedelta(hours=24)\n starttime = endtime - interval\n print('Retrieving events...')\n events = get_events(username, starttime, endtime)\n print('Summarizing events...')\n event_names, resource_names, resource_types = get_events_summaries(events)\n return {\n \"account_id\": account_id,\n \"time_discovered\": time_discovered,\n \"username\": username,\n \"deleted_key\": deleted_key,\n \"exposed_location\": exposed_location,\n \"event_names\": event_names,\n \"resource_names\": resource_names,\n \"resource_types\": resource_types\n }\n\n\ndef get_events(username, starttime, endtime):\n try:\n response = cloudtrail.lookup_events(\n LookupAttributes=[\n {\n 'AttributeKey': 'Username',\n 'AttributeValue': username\n },\n ],\n StartTime=starttime,\n EndTime=endtime,\n MaxResults=50\n )\n except Exception as e:\n print(e)\n print('Unable to retrieve CloudTrail events for user \"{}\"'.format(username))\n raise(e)\n return response\n\n\ndef get_events_summaries(events):\n event_name_counter = collections.Counter()\n resource_name_counter = collections.Counter()\n resource_type_counter = collections.Counter()\n for event in events['Events']:\n resources = event.get(\"Resources\")\n event_name_counter.update([event.get('EventName')])\n if resources is not None:\n resource_name_counter.update([resource.get(\"ResourceName\") for resource in resources])\n resource_type_counter.update([resource.get(\"ResourceType\") for resource in resources])\n return event_name_counter.most_common(10), resource_name_counter.most_common(10), resource_type_counter.most_common(10)\n","sub_path":"ExposedAccessKeys/lambda_functions/lookup_cloudtrail_events.py","file_name":"lookup_cloudtrail_events.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"59422459","text":"# 16. Write a Python program to get the difference between a\r\n# given number and 17, if the number is greater than 17 return double the absolute difference\r\n\r\nn = int(input('enter any integer: '))\r\n\r\n\r\ndef check(num):\r\n if num <= 17:\r\n return (17- num)\r\n else:\r\n return (num - 17) * 2\r\n\r\n\r\nprint(check(n))\r\n","sub_path":"problem16.py","file_name":"problem16.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"583753988","text":"import numpy as np\nimport pickle\nimport math\nimport os\n\nclass pagerank:\n\n def __init__(self,top,beta,basketsize):\n self.originDataPath = 'data/WikiData.txt'\n self.sortedDataPath = 'data/mappedtest_WikiData.txt'\n self.resultDataPath = 'data/result.txt'\n self.top=top\n self.beta=beta\n self.nodes = self.read_file()\n self.basketsize=basketsize\n self.basketnum=int(math.ceil(float(len(self.nodes)) / basketsize))\n print(\n self.basketnum\n )\n self.nodenum=self.sort_node()\n print('nodenum%d'%self.nodenum)\n self.sort_data()\n self.to_blockmatrix()\n self.generate_top()\n\n def read_file(self):\n originData = np.loadtxt(self.originDataPath,dtype='int')\n nodes = np.hstack((originData[:, 0],originData[:, 1]))\n nodes = np.unique(nodes)\n nodes.sort()\n return nodes\n\n def sort_node(self):\n nodenum = len(self.nodes)\n index = [i for i in range(nodenum)]\n map = dict(zip(index, self.nodes))\n maprev=dict(zip(self.nodes, index))\n pickle.dump(map, open('data/mid/mapor.txt', 'wb+'))\n pickle.dump(maprev, open('data/mid/maprev.txt', 'wb+'))\n return nodenum\n\n def sort_data(self):\n mapor = pickle.load(open('data/mid/maprev.txt', 'rb'))\n originData = np.loadtxt(self.originDataPath, dtype='int')\n dist=[]\n for i in range(originData.shape[0]):\n tmp = originData[i]\n l1=mapor.get(tmp[0])\n l2 = mapor.get(tmp[1])\n dist.append([l1,l2])\n np.savetxt(self.sortedDataPath,dist,fmt=\"%d %d\")\n\n def to_blockmatrix(self):\n sortedData=np.loadtxt(self.sortedDataPath,dtype='int')\n dist = []\n for i in range(sortedData.shape[0]):\n if (i+1)%1000 == 0:\n print(\"matrix to block finished :\")\n print(i*1.0/sortedData.shape[0])\n tmp = sortedData[i] \n if i == sortedData.shape[0]-1:\n dist.append(tmp[1])\n degree = len(dist)\n blocks = [[degree] for _ in range(self.basketnum)]\n for item in dist:\n blocks[int(item / self.basketsize)].append(item)\n for bas in range(self.basketnum):\n if len(blocks[bas]) > 1:\n np.savetxt(('data/mid/blocks_%d_%d.txt' % (tmp[0], bas)),blocks[bas])\n dist=[]\n elif sortedData[i+1,0] != sortedData[i,0]:\n dist.append(tmp[1])\n degree = len(dist)\n blocks = [[degree] for _ in range(self.basketnum)]\n for item in dist:\n blocksid=int(item / self.basketsize)\n blocks[blocksid].append(item)\n for bas in range(self.basketnum):\n if len(blocks[bas]) > 1:\n np.savetxt(('data/mid/blocks_%d_%d.txt' % (tmp[0], bas)), blocks[bas])\n dist=[]\n else:\n dist.append(tmp[1])\n\n def generate_top(self):\n for item in range(self.basketnum):\n r = [ 1.0 / (self.nodenum) for _ in range(self.basketsize)]\n np.save('data/oldr/oldr_%d.npy' % item, r)\n e = 1\n i=0\n print('origin sum:',1.0 / (self.nodenum)*self.basketsize*self.basketnum)\n # print result\n while e > 1e-6:\n print(str(i)+\" time train \"+str(e))\n e = 0\n # item \n # print('self.nodenum:%d'%self.nodenum)\n solvedeadend = 0;\n for item in range(self.basketnum): \n r_new = np.array([(1.0 - beta) / self.nodenum for _ in range(self.basketsize)])\n for src in range(self.nodenum):\n r_old = np.load('data/oldr/oldr_%d.npy' % (int(src/self.basketsize)))\n if not os.path.exists('data/mid/blocks_%d_%d.txt' % (src, item)):\n continue\n else:\n line = np.loadtxt('data/mid/blocks_%d_%d.txt' % (src, item))\n di = line[0]\n destList = [nodes for nodes in line[1:]]\n # print('src:',src,'destList',destList,'di',di)\n for k in destList:\n r_new[int(k % self.basketsize )] += beta * r_old[int(src % self.basketsize)] / di\n # print('r_old[int(src % self.basketsize)] / di',r_old[int(src % self.basketsize)] / di)\n np.save('data/newr/newr_%d.npy' % item, r_new) \n for src in range(self.nodenum):\n judge=0\n for item in range(self.basketnum): \n if os.path.exists('data/mid/blocks_%d_%d.txt' % (src, item)):\n judge=1\n if judge == 0:\n r_old = np.load('data/oldr/oldr_%d.npy' % (int(src/self.basketsize)))\n solvedeadend += beta * r_old[int(src % self.basketsize)]/self.nodenum\n print('solvedeadend',solvedeadend)\n for i in range(self.basketnum):\n rn = np.load('data/newr/newr_%d.npy' % i)\n rn = [i+solvedeadend for i in rn]\n np.save('data/newr/newr_%d.npy' % i, rn) \n x = []\n for i in range(self.basketnum):\n r = np.load('data/newr/newr_%d.npy' % i)\n for item in r:\n x.append(item)\n x = x[:self.nodenum]\n print('sum(x):',sum(x))\n for i in range(self.basketnum):\n rn = np.load('data/newr/newr_%d.npy' % i)\n ro = np.load('data/oldr/oldr_%d.npy' % i)\n e += np.linalg.norm((np.array(rn) - np.array(ro)), ord=1)\n np.save('data/oldr/oldr_%d.npy' % i, rn)\n print('x',x)\n print('sum',sum(x))\n temp=sorted(range(len(x)), key=lambda i: x[i], reverse=True)[:self.top]\n score=sorted(x,reverse=True)[:self.top];\n mapor = pickle.load(open('data/mid/mapor.txt', 'rb'))\n print('self.top%d'%self.top)\n re=[]\n for i in range(self.top):\n l1=mapor.get(temp[i]) # nodeid\n re.append([int(l1),score[i]])\n print('nodeid: %d score[%d]: %f'%(l1,i,score[i]))\n np.savetxt(self.resultDataPath,re, fmt=\"%d %f\")\n\n\nif __name__ == '__main__':\n top = 100\n beta = 0.85\n basketsize= 700\n p = pagerank(top, beta,basketsize)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"193366636","text":"from bs4 import BeautifulSoup\nimport time\nfrom threading import Thread\nimport requests\nfrom sqlalchemy import Column, DateTime, String, Integer,func,create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nBase = declarative_base()\n\nclass CustomBase(Base):\n __abstract__ = True\n created_at = Column(DateTime, default=func.now())\n\n# ------------------- Movies model- stores Movies info -----------------------------------\nclass Movies(CustomBase):\n id = Column(Integer, primary_key=True, autoincrement=True, nullable=False,index=True)\n name = Column(String(255), default=None)\n size = Column(String(255), default=None)\n url = Column(String(255), default=None ,primary_key=True,nullable=False ,unique=True,index=True)\n extension = Column(String(255), default=None)\n format = Column(String(255), default=None)\n owner = Column(String(255), default=None)\n __tablename__ = 'Movies'\n\n def __init__(self, name=None, size=None, url=None, extension=None, format=None, owner=None):\n self.name = name\n self.size = size\n self.url = url\n self.extension = extension\n self.format = format\n self.owner = owner\n\n def __repr__(self):\n return self.phone\n\n # def is_authenticated(self): #can be used if required\n # return True\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializeable format\"\"\"\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\ndef getSession():\n session = sessionmaker()\n session.configure(bind=engine)\n session = session()\n return session\n\ndef job(i):\n try:\n url = \"http://drive.pahe.in/file/\" + str(i)\n \"\"\" Response from url \"\"\"\n resp = requests.get(url)\n print(\"#### Visiting:\", url)\n \"\"\" parse response to lxml \"\"\"\n soup = BeautifulSoup(resp.content, 'lxml')\n data = soup.find('tbody')\n each_link_data = dict()\n for tr in data.find_all('tr'):\n td = tr.find_all('td')\n local_data = {\n td[0].text: td[1].text.replace(\"\\t\", \"\").replace(\"\\n\", \"\")\n }\n each_link_data.update(local_data)\n print(\"**** Found:\", each_link_data)\n print(\"-\" * 20)\n name = each_link_data.get('File Name')\n size = each_link_data.get('File Size')\n format = each_link_data.get('File Type')\n owner = each_link_data.get('File Owner')\n extension = each_link_data.get('File Extension')\n # url = url\n movie = Movies(name,size,url,extension,format,owner)\n session = getSession()\n session.add(movie)\n session.commit()\n t.isAlive = False\n except Exception as e:\n pass\n\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql://root:@localhost/pahe_db?charset=utf8', pool_size=200)\n Base.metadata.create_all(engine)\n for i in range(1, 91000):\n t = Thread(target=job, args=[i])\n t.daemon = True\n time.sleep(.1)\n t.start()\n\n\n\n","sub_path":"pahe_with_db.py","file_name":"pahe_with_db.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81790940","text":"#-*- coding:utf-8 -*-\n\nimport logging\n\nfrom flask.ext import script\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.collection.tasks import find_torrents, process_user_watchlist\nfrom app.core.decorators import with_request_context\nfrom app.core.models import User\nfrom app.extensions import sentry\n\n\nmanager = script.Manager()\n\n\n@manager.option('-u', '--user', dest='user_id', required=True,\n type=int, default=0)\n@manager.option('--process', dest='process', action='store_true')\n@manager.option('--queue', dest='queue', action='store_true')\n@manager.option('--torrents', dest='torrents', action='store_true')\n@with_request_context\ndef watchlist(*args, **kwargs):\n logger = logging.getLogger('app')\n user_id = kwargs.get('user_id')\n process = kwargs.get('process')\n torrents = kwargs.get('torrents')\n provider = 'imdb'\n\n try:\n user = User.query.filter_by(id=user_id).one()\n except NoResultFound:\n sentry.captureException()\n return\n\n prefix = 'Process watchlist queue for user \"%s\"' % user.login\n\n if process:\n logger.info('%s: start.' % prefix)\n process_user_watchlist.apply_async(kwargs={\n 'user_id': user.id, 'provider': provider\n }, queue='collection')\n logger.info('%s: finished' % prefix)\n\n if torrents:\n find_torrents(user_id=user_id)\n","sub_path":"app/collection/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"4100716","text":"# -*- encoding: utf-8 -*-\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nSOFTBUTTERFLY = getattr(settings, 'SOFTBUTTERFLY', None)\nMAILAUTH = SOFTBUTTERFLY.get('MAILAUTH', {})\n\nAPP_VERBOSE_NAME = MAILAUTH.get('APP_VERBOSE_NAME', _(\"Authentication and Authorization\"))\nREGISTER_PROXY_AUTH_GROUP_MODEL = MAILAUTH.get('REGISTER_PROXY_AUTH_GROUP_MODEL', True)\nENABLE_USERNAME = MAILAUTH.get('ENABLE_USERNAME', True)\n","sub_path":"softbutterfly/mailauth/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"418538730","text":"#parsing moduls\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom csv import writer\n\n#moduls for mailing\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n\nimport glob\nimport os\n\n\n#moduls for random string\nimport random\nimport string\n\n#random string \nrandom = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(8)])\nprint(random)\n\n#loop thrue pages\nfor x in range(1,10):\n response = requests.get(\"http://www.bolha.com/iskanje?q=mestno+kolo&sort=1&page=%s\" %x)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n posts = soup.find_all(class_='ad')\n\n #random string filename\n with open(\"mestno_kolo_%s.txt\"%random, 'a', encoding=\"utf-8\") as csv_file:\n csv_writer = writer(csv_file)\n \n \n #match content and write to .txt\n for post in posts:\n naslov = post.find('a')['title'].replace(\"č\",\"c\").replace(\"ž\",\"z\").replace(\"š\",\"s\").replace(\"Ž\",\"Z\")\n link = \"www.bolha.com\" + post.find('a')['href'] \n cena = post.find(class_=\"price\").get_text().replace(\"€\",\"e\")\n csv_writer.writerow([naslov,\"----\",cena,\"----\",link])\n\n\n#get name of last generated file where script is located\nlist_of_files = glob.glob('*txt') \nlatest_file = max(list_of_files, key=os.path.getctime)\nprint (latest_file)\n\n #mail credentials\n\nemail_user = '' #mail username\nemail_password = '' #mail password\nemail_send = '' #mail reciever\n\nsubject = 'Bolha_scrape_kolesa'\n\nmsg = MIMEMultipart()\nmsg['From'] = email_user\nmsg['To'] = email_send\nmsg['Subject'] = subject\n\nbody = 'Avtomatsko generiran email '\nmsg.attach(MIMEText(body,'plain'))\n\nfilename='%s'%latest_file\nattachment =open(filename,'rb')\n\npart = MIMEBase('application','octet-stream')\npart.set_payload((attachment).read())\nencoders.encode_base64(part)\npart.add_header('Content-Disposition',\"attachment; filename= \"+filename)\n\nmsg.attach(part)\ntext = msg.as_string()\nserver = smtplib.SMTP('smtp.gmail.com',587)\nserver.starttls()\nserver.login(email_user,email_password)\n\n\nserver.sendmail(email_user,email_send,text)\nserver.quit()\n \n \t\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396148204","text":"from torch import nn\n\n\nclass HuberLoss(nn.Module):\n \"\"\"\n The choice of delta is critical because it determines what you’re willing to consider as an outlier.\n Residuals larger than delta are minimized with L1 (which is less sensitive to large outliers),\n while residuals smaller than delta are minimized “appropriately” with L2.\n \"\"\"\n def __init__(self, delta=0.1):\n super(HuberLoss, self).__init__()\n self.mse = nn.MSE()\n self.mae = nn.MAE()\n self.delta = delta\n\n def forward(self, y_pred, y_target, ):\n e = self.mse(y_pred, y_target)\n if e > self.delta:\n e = self.delta * self.mae(y_pred, y_target) + 1/2 * (self.delta ** 2)\n return e/y_target.shape[0]\n","sub_path":"models/loss/huber.py","file_name":"huber.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"516546314","text":"\nimport logging\nfrom collections import OrderedDict\n\nfrom .. import requests\n\nimport constants\n\n\ndef get_trigger_id_list(conn, plan_id, trigger_name, trigger_desc=None):\n \"\"\"\n Get all the trigger ids\n :param conn:\n :param plan_id:\n :param trigger_name:\n :param trigger_desc:\n :return:\n \"\"\"\n params = {\n \"buildKey\": plan_id,\n }\n\n trigger_name = trigger_name.lower()\n\n html_root = requests.get_ui_return_html(\n conn,\n conn.baseurl + '/chain/admin/config/editChainTriggers.action',\n params)\n\n trigger_id_list = []\n\n editor_html = html_root.find('.//div[@id=\"panel-editor-list\"]')\n if editor_html is None:\n logging.error(constants.INCORRECT_PLAN_KEY_ERROR_MSG % plan_id)\n return None\n\n trigger_list = editor_html.findall('.//li[@class=\"item\"]')\n for index, trigger_html in enumerate(trigger_list):\n tri_name = trigger_html.find('.//*[@class=\"item-title\"]').text.lower()\n tri_desc_html = trigger_html.find('.//*[@class=\"item-description\"]')\n\n if tri_desc_html is not None:\n tri_desc = tri_desc_html.text.lower()\n else:\n tri_desc = None\n\n if tri_name == trigger_name \\\n and (trigger_desc is None or tri_desc == trigger_desc.lower()):\n trigger_id_list.append(index+1)\n\n return trigger_id_list\n\n\ndef update_trigger_to_commit(conn, plan_id, trigger_name, enable_repos_list, trigger_desc=None,\n raise_multiple_triggers=False):\n \"\"\"\n This method is for bamboo v5.7.2\n Batch update the trigger\n :param conn:\n :param plan_id:\n :param enable_repos_list:\n A name list of which repository can be enabled\n :param trigger_name:\n Name of the trigger, ignore case,\n 'repository triggered build', 'stash repository triggered' for example\n :param trigger_desc:\n The trigger description\n :param raise_multiple_triggers:\n True: if the trigger specified is more than one, raise exception for safe\n :return:\n \"\"\"\n trigger_id_list = get_trigger_id_list(conn, plan_id, trigger_name, trigger_desc)\n if trigger_id_list is None:\n return\n\n if raise_multiple_triggers and len(trigger_id_list) > 0:\n raise Exception('Multiple triggers!')\n\n params = {\n \"buildKey\": plan_id,\n \"decorator\": \"nothing\",\n \"confirm\": \"true\",\n }\n\n res_list = []\n for trigger_id in trigger_id_list:\n params.update({\n 'triggerId': trigger_id\n })\n\n trigger_params = OrderedDict()\n trigger_params['buildKey'] = plan_id\n trigger_params['triggerId'] = trigger_id\n trigger_params[\"confirm\"] = \"true\"\n trigger_params['decorator'] = 'nothing'\n trigger_params['submit'] = 'Yes'\n\n form = requests.get_ui_return_html(\n conn,\n conn.baseurl + '/chain/admin/config/editChainTrigger.action',\n params)\n\n form_inputs = form.findall('.//input')\n\n options = form.findall('.//select[@name=\"selectedBuildStrategy\"]/option')\n for option in options:\n is_selected = option.attrib.get('selected')\n value = option.attrib.get('value')\n if is_selected == 'selected':\n del option.attrib['selected']\n\n if value == 'trigger':\n option.attrib['selected'] = 'selected'\n\n trigger_params['selectedBuildStrategy'] = 'trigger'\n\n form.cssselect('fieldset[id=\"fieldArea_repositoryTriggers\"]')[0].attrib['style']='display:block'\n\n repos_value_list = []\n\n for form_input in form_inputs:\n name = form_input.attrib.get('name')\n i_type = form_input.attrib.get('type')\n\n if i_type == 'checkbox' and name == 'repositoryTrigger':\n input_id = form_input.attrib.get('id')\n tmp_repos_name = form.find('.//label[@for=\"'+input_id+'\"]').text.strip().replace('\\n', '')\n if tmp_repos_name in enable_repos_list:\n value = form_input.attrib.get('value')\n repos_value_list.append(value)\n form_input.attrib['checked'] = 'checked'\n else:\n if form_input.attrib.get('checked') == 'checked':\n del form_input.attrib['checked']\n\n elif i_type == 'checkbox' and name != 'repositoryTrigger':\n is_checked = form_input.attrib.get('checked')\n if is_checked and is_checked == \"checked\":\n value = \"true\"\n else:\n value = \"false\"\n else:\n value = form_input.attrib.get('value')\n\n trigger_params[name] = value\n\n trigger_params['repositoryTrigger'] = repos_value_list\n\n res = requests.post_ui_return_html(\n conn,\n conn.baseurl + '/chain/admin/config/updateChainTrigger.action',\n trigger_params\n )\n\n res_list.append(res)\n\n return res_list\n\n\n\n\n\n\n\n\n\n","sub_path":"lib/bamboo/triggers.py","file_name":"triggers.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"645817173","text":"\nfrom django.db.models import Q\nfrom time import sleep\n\nfrom .newentityinspector import NewEntityInspector\nfrom .diffinspector import DiffInspector\nfrom .diffdigest import DiffDigest\nfrom .models import Tag\n\nfrom store.models import Batch\n\nclass BatchInspector(object):\n \"\"\"\n Extracts tags from diffs and revision ids,\n when the batch contains actions that do not\n expose enough information in their summaries.\n \"\"\"\n\n tags_for_diff_inspection = {\n 'wbeditentity-update',\n 'wbeditentity-update-languages',\n 'wbeditentity-update-languages-short',\n 'wbeditentity-update-languages-and-other',\n 'wbeditentity-update-languages-and-other-short',\n 'wbcreateclaim', # for qualifiers\n # 'wbsetclaim-update', # could add qualifiers, but currently only used by OR to add refs\n }\n\n max_edits_fetch = 50 # only look for that many edits with inspectable actions\n max_diff_inspections = 10 # one request for each\n max_new_items_inspections = 25 # one request for all, cheaper\n\n requests_delay = 0.5\n\n def __init__(self, new_entity_inspector=None, diff_inspector=None, endpoint='https://www.wikidata.org/w/api.php'):\n self.new_entity_inspector = new_entity_inspector or NewEntityInspector(endpoint=endpoint)\n self.diff_inspector = diff_inspector or DiffInspector(endpoint=endpoint)\n\n def inspect(self, batch):\n \"\"\"\n Inspect the given batch if needed, and add the corresponding\n tags to the batch.\n \"\"\"\n digest = DiffDigest()\n tags = set(batch.tag_ids)\n\n if tags & self.tags_for_diff_inspection:\n # We need to inspect some diffs!\n edits = batch.edits.filter(oldrevid__gt=0)[:self.max_edits_fetch]\n nb_edits_inspected = 0\n for edit in edits:\n if set(tag.id for tag in Tag.extract(edit)) & self.tags_for_diff_inspection:\n digest += self.diff_inspector.inspect(edit.oldrevid, edit.newrevid)\n nb_edits_inspected += 1\n sleep(self.requests_delay)\n if nb_edits_inspected >= self.max_diff_inspections:\n break\n\n if batch.nb_new_pages:\n # We need to inspect some new items!\n revids = batch.edits.filter(oldrevid=0)[:self.max_new_items_inspections].values_list('newrevid', flat=True)\n digest += self.new_entity_inspector.inspect(revids)\n sleep(self.requests_delay)\n\n return digest\n\n def add_missing_tags(self, batch):\n \"\"\"\n Like `inspect`, but adds any missing tags to the batch instead of returning a digest.\n \"\"\"\n diffdigest = self.inspect(batch)\n tags = ([Tag.for_property(pid) for pid in diffdigest.statements | diffdigest.qualifiers ] +\n [Tag.for_language(lang) for lang in diffdigest.labels | diffdigest.descriptions | diffdigest.aliases | diffdigest.sitelinks ])\n tags_not_there_yet = [tag for tag in tags if tag.id not in batch.tag_ids]\n Tag.add_tags_to_batches({batch.id: [tag.id for tag in tags_not_there_yet]})\n\n def inspect_batches_since(self, since_time):\n \"\"\"\n Inspects all batches that need inspection, only considering\n batches modified since the given time.\n \"\"\"\n queryset = Batch.objects.filter(Q(tags__id__in = self.tags_for_diff_inspection) | Q(nb_new_pages__gt = 0), last_modified__gt=since_time, archived=False).order_by('ended').distinct()\n for batch in queryset:\n self.add_missing_tags(batch)\n\n","sub_path":"tagging/batchinspector.py","file_name":"batchinspector.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630490322","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom unittest import TestCase\nfrom frame.platform.wtf.fields import MultipleField\n\nfrom wtforms import Form\nfrom wtforms import fields\nfrom wtforms import validators\nfrom werkzeug.datastructures import MultiDict\n\nclass FieldsTestCase(TestCase):\n\n def test_multiple_field(self):\n\n class TestForm(Form):\n m=MultipleField(fields.StringField(validators=[\n validators.Required(), validators.Email()]))\n\n form_data = MultiDict([('m', '1'), ('m', '2')])\n form = TestForm(form_data)\n self.assertFalse(form.validate())\n self.assertEqual(form.errors['m'][0][0], 'Invalid email address.')\n\n form_data = MultiDict([('m', 'abc@guokr.com'), ('m', 'wtf@guokr.com')])\n form = TestForm(form_data)\n self.assertTrue(form.validate())\n self.assertEqual(form.m.data, ['abc@guokr.com', 'wtf@guokr.com'])\n\n form_data = MultiDict([])\n form = TestForm(form_data)\n self.assertTrue(form.validate())\n self.assertEqual(form.m.data, [])\n\n class Test2Form(Form):\n m=MultipleField(fields.StringField(validators=[\n validators.Required(), validators.Email()]),\n validators=[validators.Required()])\n\n form_data = MultiDict([])\n form = Test2Form(form_data)\n self.assertFalse(form.validate())\n self.assertEqual(form.errors['m'][0], 'This field is required.')\n","sub_path":"frame/platform_src/tests/test_wtf_fields.py","file_name":"test_wtf_fields.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"266577073","text":"#https://www.analyticsvidhya.com/blog/2015/10/beginner-guide-web-scraping-beautiful-soup-python/\n\nimport urllib\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nwiki = 'https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India'\n\npage = urllib.request.urlopen(wiki)\n\nsoup = BeautifulSoup(page, 'lxml')\n\nlinks = soup.find_all('a')\ntables = soup.find_all('table')\ncapitals_table = soup.find('table', class_='wikitable sortable plainrowheaders')\n\ncolumns = [[] for x in range(7)]\nheaders = []\n \nfor row in capitals_table.findAll('tr'):\n cells = row.findAll(['td', 'th'])\n for idx, cell in enumerate(cells):\n columns[idx].append(cell.find(text=True))\n\nfor column in columns:\n headers.append(column.pop(0))\n \nframe = pd.DataFrame(columns).transpose()\nframe.columns = headers\n\n \n\n \n\n","sub_path":"IntroWithSoup.py","file_name":"IntroWithSoup.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"314526791","text":"import os\r\nimport nltk\r\nimport nltk.tokenize\r\nfrom nltk.parse import stanford\r\nimport ngrammodeler as NG\r\nimport pickle\r\nimport plotFunctions as PF\r\n\r\nos.environ['STANFORD_PARSER'] = '/root/src/ls11761/ls-project/stanford/stanford-parser-full/jars'\r\nos.environ['STANFORD_MODELS'] = '/root/src/ls11761/ls-project/stanford/stanford-parser-full/jars'\r\n\r\nparser = stanford.StanfordParser(model_path=\"/root/src/ls11761/ls-project/stanford/englishPCFG.ser.gz\")\r\n\r\ndef saveObj(obj, name):\r\n with open(name + '.pkl', 'wb') as f:\r\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\r\n\r\ndef loadObj(name):\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n\r\n\r\ndef posParseArticle(article):\r\n taggedArticle = []\r\n for sentence in article:\r\n sentence = sentence.lower()\r\n text = nltk.word_tokenize(sentence)\r\n posTaggedSentence = nltk.pos_tag(text)\r\n posTaggedSentence = [tag[1] for tag in posTaggedSentence]\r\n taggedArticle.append(posTaggedSentence)\r\n return taggedArticle\r\n\r\ndef importArticles(corpusFileName):\r\n articles = []\r\n path = os.getcwd()\r\n with open(path + '/' + corpusFileName, \"r\") as f:\r\n lines = f.readlines()\r\n article = []\r\n for line in lines:\r\n line = line.rstrip()\r\n if line == \"~~~~~\":\r\n if article:\r\n articles.append(article)\r\n article = []\r\n else:\r\n # Removes the start stop tags for the sentence\r\n line = line[4:]\r\n line = line[:-4]\r\n line = line.rstrip()\r\n article.append(line)\r\n articles.append(article)\r\n return articles\r\n\r\ndef importDataSet(corpusFileName):\r\n corpus = []\r\n path = os.getcwd()\r\n with open(path + '/' + corpusFileName, \"r\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n line = line.rstrip()\r\n line = line[4:]\r\n line = line[:-4]\r\n line = line.rstrip()\r\n corpus.append(line)\r\n return corpus\r\n\r\ndef posParseLines(corpus, name):\r\n taggedCorpus = []\r\n for sentence in corpus:\r\n sentence = sentence.lower()\r\n text = nltk.word_tokenize(sentence)\r\n posTaggedSentence = nltk.pos_tag(text)\r\n posTaggedSentence = [tag[1] for tag in posTaggedSentence]\r\n taggedCorpus.append(posTaggedSentence)\r\n saveObj(taggedCorpus, name)\r\n return taggedCorpus\r\n\r\n\r\n\r\n\r\ndef getFakeGood(labelsFileName):\r\n path = os.getcwd()\r\n with open(path + '/' + labelsFileName, \"r\") as f:\r\n lines = f.readlines()\r\n labels = []\r\n for line in lines:\r\n line = line.rstrip()\r\n labels.append(int(line))\r\n return labels\r\n\r\ndef posParseArticles(articles, name):\r\n parsedArticles = []\r\n for article in articles:\r\n posParsedArticle = posParseArticle(article)\r\n parsedArticles.append(posParsedArticle)\r\n saveObj(parsedArticles, name)\r\n return parsedArticles\r\n\r\ndef computeLogLikelihood(article):\r\n pass\r\n\r\n\r\ndef main():\r\n goodArticles = []\r\n badArticles = []\r\n dataSet = importDataSet('LM-train-100MW.txt')\r\n parsedCorpus = posParseLines(dataSet, 'corpusPosTagged')\r\n #parsedCorpus = loadObj('corpusPosTagged')\r\n articles = importArticles('trainingSet.dat')\r\n labels = getFakeGood('trainingSetLabels.dat')\r\n\r\n # fg = open('goodArticles.txt', 'w')\r\n # fb = open('badArticles.txt', 'w')\r\n # i = 0\r\n # for label in labels:\r\n # if label == 1:\r\n # goodArticles.append(articles[i])\r\n # articleScores = parser.raw_parse_sents_PCFG(articles[i])\r\n # sum = 0\r\n # for a in articleScores:\r\n # a = float(a)\r\n # sum = sum + a\r\n # averageScore = sum/len(articleScores)\r\n # fg.write(\"%s, %s, %f\\n\" % (articles[i], articleScores, averageScore))\r\n # if label == 0:\r\n # badArticles.append(articles[i])\r\n # articleScores = parser.raw_parse_sents_PCFG(articles[i])\r\n # sum = 0\r\n # for a in articleScores:\r\n # a = float(a)\r\n # sum = sum + a\r\n # averageScore = sum / len(articleScores)\r\n # fb.write(\"%s, %s, %f\\n\" % (articles[i], articleScores, averageScore))\r\n # i = i + 1\r\n # fg.close()\r\n # fb.close()\r\n # uncomment the next if you want to pos parse the articles again, otherwise it just loads the last parse\r\n #parsedGoodArticles = posParseArticles(goodArticles, 'posgoodarticles')\r\n #parsedBadArticles = posParseArticles(badArticles, 'posbadarticles')\r\n # parsedGoodArticles = loadObj('posgoodarticles')\r\n # parsedBadArticles = loadObj('posbadarticles')\r\n # trigramModeler = NG.NgramModeler(parsedGoodArticles)\r\n # trigramModeler2 = NG.NgramModeler(parsedBadArticles)\r\n # print(set([a[0] for a in trigramModeler.getTopNgrams(20)])-set([a[0] for a in trigramModeler2.getTopNgrams(20)]))\r\n # print(set([a[0] for a in trigramModeler2.getTopNgrams(20)])-set([a[0] for a in trigramModeler.getTopNgrams(20)]))\r\n # y1 = []\r\n # for article in parsedGoodArticles:\r\n # llArticle = trigramModeler.computeAverageArticleLogLikelihood(article)\r\n # y1.append(llArticle)\r\n # print(llArticle)\r\n # print(len(y1))\r\n # PF.plotLL(y1,y1)\r\n #\r\n # parsedBadArticles = loadObj('posbadarticles')\r\n # print(\"Bad Articles\")\r\n # print\r\n # for article in parsedBadArticles:\r\n # #print(article)\r\n # llArticle = trigramModeler.computeAverageArticleLogLikelihood(article)\r\n # print(llArticle)\r\n\r\nif __name__ == \"__main__\": main()\r\n","sub_path":"posngrams.py","file_name":"posngrams.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"437522816","text":"import pandas as pd\n\n\ndef main():\n pages = pd.read_csv('links.csv')\n all_pages = pages.drop_duplicates(subset=['pages','text'], keep='last')\n page_links = all_pages\n page_links.to_csv('new_links.csv', index=False, header=True)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"clean_icd10_link.py","file_name":"clean_icd10_link.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"635889555","text":"import pandas as pd\nimport numpy as np\nimport pyqt_fit.nonparam_regression as smooth\nfrom pyqt_fit import npr_methods\nimport matplotlib.pyplot as plt\nimport Path\n\npath = Path.GetHomePath()\n\nTime = 2\n\nDataName = \"SimulationResults/Comparison_Blocks/Comparison_SW_SW.csv\"\nDataName0 = \"SimulationResults/Comparison_Blocks/Comparison_SW_SW_Partition_0.csv\"\nDataName1 = \"SimulationResults/Comparison_Blocks/Comparison_SW_SW_Partition_1.csv\"\n\nSaveStarter = \"SimulationResults/FOI_Pics/ComparisonPics/BlockModels/SW_SW_Incidence_\"\n\na = pd.read_csv(path + DataName)\na0 = pd.read_csv(path + DataName0)\na1 = pd.read_csv(path + DataName1)\n\na = a.loc[a.EventTime < Time, ]\na0 = a0.loc[a0.EventTime < Time, ]\na1 = a1.loc[a1.EventTime < Time, ]\n\nParts = list(set(a.Partition.values))\nThresholds = list(set(a.Threshold.values))\nThresholds.sort()\n\ntime = float(10)**(-1)\n\nminI = a.I.values.min()\nmaxI = a.I.values.max()\nxs = np.arange(minI,maxI,1)\n\nj = 0\ncol = 4\nrow = 3\nxlim = (0,2000)\nfor part in Parts:\n b = a.loc[a.Partition == part,]\n b0 = a0.loc[a0.Partition == part,]\n b1 = a1.loc[a1.Partition == part,]\n\n fig,ax = plt.subplots(row,col,sharex = 'col',sharey = 'row',figsize = [12,8])\n for i in range(len(Thresholds)):\n c = b.loc[b.Threshold==Thresholds[i],]\n c0 = b0.loc[b0.Threshold==Thresholds[i],]\n c1 = b1.loc[b1.Threshold==Thresholds[i],]\n\n if len(c) > 10:\n minI = c.I.values.min()\n maxI = c.I.values.max()\n xs = np.arange(minI,maxI,1)\n k = smooth.NonParamRegression(c.I.values,c.Inc.values,method = npr_methods.LocalPolynomialKernel(q=1),bandwidth = 50)\n k.fit()\n k0 = smooth.NonParamRegression(c0.I.values,c0.Inc.values,method = npr_methods.LocalPolynomialKernel(q=1),bandwidth = 50)\n k0.fit()\n k1 = smooth.NonParamRegression(c1.I.values,c1.Inc.values,method = npr_methods.LocalPolynomialKernel(q=1),bandwidth = 50)\n k1.fit()\n # ax[i/col,i%col].plot(c.I.values,c.Inc.values,'k.',label = 'Whole Network')\n # ax[i/col,i%col].plot(c0.I.values,c0.Inc.values,'r.',label = 'Block 0')\n # ax[i/col,i%col].plot(c1.I.values,c1.Inc.values,'b.',label = 'Block 1')\n ax[i/col,i%col].plot(xs,k(xs),'k-',linewidth = 2,label = 'Whole Network')\n ax[i/col,i%col].plot(xs,k0(xs),'r-',linewidth = 2,label = 'Block 0')\n ax[i/col,i%col].plot(xs,k1(xs),'b-',linewidth = 2,label = 'Block 1')\n ylim = ax[0,0].get_ylim()\n ax[i/col,i%col].set_ylim(ylim)\n ax[i/col,i%col].set_xlim(xlim)\n ax[i/col,i%col].set_title(\"Threshold: \" + str(np.round(Thresholds[i],4)))\n else:\n ax[i/col,i%col].plot(c.I.values,c.Inc.values,'.')\n #ax[i/5,i%5].set_xlim([0,100])\n ylim = ax[0,0].get_ylim()\n ax[i/col,i%col].set_ylim(ylim)\n ax[i/col,i%col].set_xlim(xlim)\n ax[i/col,i%col].set_title(\"Threshold: \" + str(np.round(Thresholds[i],4)))\n\n fig.suptitle(\"SW SW Block Model, Partition: \" + str(part) + \", Time Round: \" + str(time))\n fig.text(0.5,0.04,\"I\",ha = \"center\")\n fig.text(0.04,0.5,\"Incidence\",va = 'center',rotation = 'vertical')\n plt.savefig(path + SaveStarter + part + \"_Reg.png\")\n plt.close()\n del fig\n del ax\n","sub_path":"PicCode/MakePicCode/MakeSWSWPics.py","file_name":"MakeSWSWPics.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"602030964","text":"\"\"\"\nMiCADO Submitter Engine Submitter Config\n----------------------------------------\nA module allowing the configuration of the whole submitter\n\"\"\"\nimport logging\nfrom os import path\n\nimport ruamel.yaml as yaml\n\nbasepath = path.dirname(__file__)\nCONFIG_FILE = \"{}/system/key_config.yml\".format(basepath)\n\nlogger = logging.getLogger(\"submitter.\" + __name__)\n\n\nclass SubmitterConfig:\n \"\"\"\n This is the SubmitterConfig,\n in charge of the configuration of the whole submitter.\n It has ``__init__()``, ``get_list_adaptors()``,\n ``_reading_config()``, ``_find_get_input()``,\n ``get_SubmitterConfig()``, ``get_dict()`` and ``get_node_from_type()``.\n\n Optional testing parameter can be passed to __init__\n to define which key_config files to take for test purposes.\n\n \n \"\"\"\n\n def __init__(self, testing=None):\n logger.debug(\"initialisation of SubmitterConfig class\")\n self.config_path = testing or CONFIG_FILE\n config = self._reading_config()\n \n self.main_config = config[\"main_config\"]\n self.step_config = config[\"step\"]\n self.logging_config = config[\"logging\"]\n self.adaptor_config = config[\"adaptor_config\"]\n\n def get_list_adaptors(self):\n \"\"\"return list of adaptors to use\"\"\"\n logger.debug(\"get the list of adaptors\")\n adaptor_list = []\n for key, value in self._reading_config()[\"adaptor_config\"].items():\n adaptor_list.append(key)\n\n logger.debug(\"adaptors: {}\".format(adaptor_list))\n return adaptor_list\n\n def _reading_config(self):\n \"\"\"reading the config file and creating a dictionary related to it\"\"\"\n logger.debug(\"reading config file\")\n dic_types = dict()\n yaml.default_flow_style = False\n with open(self.config_path, \"r\") as stream:\n try:\n\n dic_types = yaml.round_trip_load(\n stream.read(), preserve_quotes=True\n )\n except OSError as exc:\n\n logger.error(\"Error while reading file, error: %s\" % exc)\n logger.debug(\"return dictionary of types from config file\")\n return dic_types\n\n def resolve_inputs(self, template):\n self._find_get_input(template.tpl, template)\n # Update nodetemplate properties\n for node in template.nodetemplates:\n node._properties = node._create_properties()\n\n def _find_get_input(self, tpl, template):\n for key, value in tpl.items():\n if key == \"get_input\":\n return value\n elif isinstance(value, dict):\n result = self._find_get_input(value, template)\n if result:\n tpl[key] = self._get_input_value(result, template)\n elif isinstance(value, list):\n for i in value:\n if not isinstance(i, dict):\n continue\n result = self._find_get_input(i, template)\n if result:\n tpl[key][i] = self._get_input_value(result, template)\n\n def _get_input_value(self, key, template):\n try:\n return template.parsed_params[key]\n except (KeyError, TypeError):\n logger.debug(f\"Input '{key}' not given, using default\")\n\n try:\n return [\n param.default for param\n in template.inputs\n if param.name == key][0]\n except IndexError:\n logger.error(f\"Input '{key}' has no default\")\n\n","sub_path":"submitter/submitter_config.py","file_name":"submitter_config.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429053754","text":"from nltk.corpus import wordnet as wn\n\n\ndef synonyms(word):\n word_list = []\n for i, j in enumerate(wn.synsets(word)):\n words = j.lemma_names()\n for item in words:\n if item not in word_list:\n word_list.append(item)\n return word_list\n\n\ndict = {}\n\n\ndef find_word(word, text):\n word_list = synonyms(word)\n \n # iterate through the list of synonyms\n for item in word_list:\n sublist = []\n \n # make sure not getting the same word\n if not item == word:\n # make a list and add item, page number, column number to it\n sublist.append(item)\n sublist.append(Drafts.Navigate.Navigate.get_line(item, \"sample.txt\"))\n sublist.append(Drafts.Navigate.Navigate.get_specific_column_number(item, \"sample.txt\"))\n \n # turn the list into tuple\n item_tuple = tuple(sublist)\n if item in text and word not in dict:\n dict[word] = [item_tuple]\n elif word in dict and item in text and item_tuple not in dict[word]:\n dict[word].append(item_tuple)\n\n\ndef word_to_concepts(text):\n \n text = text.split()\n for item in text:\n find_word(item, text)\n return dict\n\n\n# print(word_to_concepts(\"sample.txt\"))\n","sub_path":"Drafts/Navigate/synonyms.py","file_name":"synonyms.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"601090371","text":"from functools import wraps\n\nimport flask\nfrom parse import *\n\nimport urllib.parse\nfrom sqlalchemy import func, or_, and_\nfrom sqlalchemy.sql import column, table\nfrom flask import Blueprint, jsonify, request, current_app, session\nfrom flask_socketio import join_room\n\nfrom .models import db, Game, User, Email, Friendship, Avatar\nfrom .tools import randomize_page, get_wiki_page, getSummaryWikiPage, send_mail\nfrom datetime import datetime, timedelta\n\nimport jwt\nimport uuid\n\napi = Blueprint('api', __name__)\n\n\ndef token_required(f):\n @wraps(f)\n def _verify(*args, **kwargs):\n auth_headers = request.headers.get('Authorization', '').split()\n\n invalid_msg = {\n 'message': 'Invalid token. Registeration and / or authentication required',\n 'authenticated': False\n }\n expired_msg = {\n 'message': 'Expired token. Reauthentication required.',\n 'authenticated': False\n }\n\n if len(auth_headers) != 2:\n return jsonify(invalid_msg), 401\n\n try:\n token = auth_headers[1]\n\n data = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256'])\n user = User.query.filter_by(username=data['sub']).first()\n if not user:\n raise RuntimeError('User not found')\n return f(user, *args, **kwargs)\n except jwt.ExpiredSignatureError:\n return jsonify(expired_msg), 401 # 401 is Unauthorized HTTP status code\n except jwt.InvalidTokenError as e:\n print(e)\n return jsonify(invalid_msg), 401\n\n return _verify\n\n\n@api.route('/')\ndef index():\n sid = User.query.filter_by(id=1).first().sid\n if sid is not None:\n socketio().emit('NEW_FRIEND_INVITATION', 'John Doe', to=sid)\n\n return flask.render_template('index.html')\n\n@api.route('/me', methods=('POST',))\n@token_required\ndef me(current_user):\n return jsonify(current_user.to_dict())\n\n@api.route('/login', methods=('POST',))\ndef login():\n data = request.get_json()\n (user, message) = User.authenticate(**data)\n\n if message:\n code = 401\n if user is not None:\n code = 403\n # user.validation_token = uuid.uuid4()\n #\n # db.session.add(user)\n # db.session.flush()\n # db.session.commit()\n # send_mail('register', user, data={'pseudo': user.username, 'token': str(user.validation_token),\n # 'linkValider': f'[appUrl]/inscription/{user.validation_token}'})\n return jsonify({ 'message': message, 'authenticated': False }), code\n\n session['user'] = user.to_dict()\n\n token = jwt.encode({\n 'sub': user.username,\n 'iat':datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=1)},\n current_app.config['SECRET_KEY'])\n\n body = user.to_dict()\n body['jwt'] = token\n\n resp = jsonify(body)\n\n resp.set_cookie('jwt_access_token', token)\n\n return resp\n\n@api.route('/register', methods=('POST',))\ndef register():\n data = request.get_json()\n\n (is_valid, error, field) = User.verify_form('register', **data)\n if not is_valid:\n return jsonify({ 'message': error, 'fields': [field], 'authenticated': False }), 401\n\n fields = []\n\n if User.query.filter(func.lower(User.username) == func.lower(data.get('username'))).first() is not None:\n fields.append('username')\n if User.query.filter(func.lower(User.email) == func.lower(data.get('email'))).first() is not None:\n fields.append('email')\n if len(fields) > 0:\n return jsonify({ 'message': f'Votre [field] : [field_value] existe déjà', 'fields': fields, 'authenticated': False }), 403\n\n #Create account, send email and generate a validation token\n user = User(email = data.get('email').lower(), username = data.get('username'), password = data.get('password'))\n user.validation_token = uuid.uuid4()\n\n db.session.add(user)\n db.session.flush()\n db.session.commit()\n\n msg = send_mail('register', user, data={'pseudo': user.username, 'token': str(user.validation_token), 'linkValider': f'[appUrl]/inscription/{user.validation_token}'})\n\n return jsonify(True)\n\n@api.route('/register/confirm/', methods=('POST',))\ndef confirmation(token):\n user = User.query.filter_by(validation_token=token).first()\n\n try:\n user.validation_token = None\n\n db.session.add(user)\n db.session.flush()\n db.session.commit()\n except Exception as e:\n return jsonify({ 'message': f'Compte non validé', 'authenticated': False }), 403\n\n return jsonify(True)\n\n@api.route('/users', methods=('POST',))\n@token_required\ndef get_user(current_user):\n data = request.get_json()\n\n f = User.query\\\n .join(Friendship, and_(or_(User.id==Friendship.friend_id, User.id==Friendship.user_id), or_(Friendship.friend_id == current_user.id, Friendship.user_id == current_user.id)), isouter=True)\\\n .with_entities(User.username, Friendship.status, Friendship.user_id, User.is_online, User.id, User.avatar)\\\n .filter(User.username == data['username']).first()\n\n return jsonify({'username': f[0], 'relation': f[1], 'user_id': f[2], 'isOnline': f[3], 'uid': f[4], 'avatar': Avatar.query.filter_by(id = f[5]).first().to_dict()})\n\n@api.route('/avatars', methods=('GET',))\n@token_required\ndef get_list_avatars(current_user):\n f = Avatar.query\\\n .join(table('user_avatar').join(User, and_(User.id==column('user_id'), User.id==current_user.id)), Avatar.id == column('avatar_id'), isouter=True)\\\n .with_entities(Avatar.id, Avatar.path, column('avatar_id'))\\\n .all()\n\n return jsonify([{'id': a[0], 'path': current_app.config['APP_URL_BACK'] + f'/static/avatar/{a[1]}', 'isUnlocked': a[2] is not None} for a in f])\n\n@api.route('/avatars', methods=('POST',))\n@token_required\ndef set_avatar(current_user):\n data = request.get_json()\n\n current_user.avatar = data['avatarId']\n\n db.session.flush()\n db.session.commit()\n return jsonify()\n\n@api.route('/users-search', methods=('POST',))\n@token_required\ndef search_user(current_user):\n data = request.get_json()\n\n users = User.query.filter(User.username != current_user.username, func.lower(User.username).like(f'%{data[\"username\"].lower()}%'))\n\n users = users.all()\n\n return jsonify([u.to_dict() for u in users])\n\n@api.route('/friends', methods=('GET',))\n@token_required\ndef get_friends(current_user):\n friends_list = User.query\\\n .join(Friendship, or_(User.id==Friendship.friend_id, User.id==Friendship.user_id))\\\n .with_entities(User.username, Friendship.status, Friendship.user_id, User.is_online, User.avatar)\\\n .filter(User.id != current_user.id, or_(Friendship.user_id==current_user.id, Friendship.friend_id==current_user.id))\\\n .order_by(Friendship.created_at)\\\n .all()\n\n return jsonify([{'username': f[0], 'status': f[1], 'user_id': f[2], 'isOnline': f[3], 'avatar': Avatar.query.filter_by(id = f[4]).first().to_dict()} for f in friends_list])\n\n@api.route('/friends/add', methods=('POST',))\n@token_required\ndef add_friends(current_user):\n data = request.get_json()\n friend_invitation = Friendship()\n friend_invitation.user_id = current_user.id\n friend_invitation.friend_id = data['friend_id']\n friend_invitation.status = 'pending'\n\n db.session.add(friend_invitation)\n db.session.flush()\n db.session.commit()\n\n sid = User.query.filter_by(id=data['friend_id']).first().sid\n if sid is not None:\n socketio().emit('NEW_FRIEND_INVITATION', current_user.username, to=sid)\n\n return jsonify()\n\n\n@api.route('/friends', methods=('POST',))\n@token_required\ndef handle_friends_invitation(current_user):\n data = request.get_json()\n friend_invitation = Friendship.query.filter_by(user_id = data['user_id'], friend_id = current_user.id, status = 'pending').first()\n\n if data['accept'] is True:\n friend_invitation.status = 'friends'\n sid = User.query.filter_by(id=data['user_id']).first().sid\n if sid is not None:\n socketio().emit('NEW_FRIEND', current_user.username, to=sid)\n db.session.add(friend_invitation)\n else:\n friend_invitation.status = 'refused'\n db.session.delete(friend_invitation)\n\n db.session.flush()\n db.session.commit()\n\n friends_list = User.query\\\n .join(Friendship, or_(User.id==Friendship.friend_id, User.id==Friendship.user_id))\\\n .with_entities(User.username, Friendship.status, Friendship.user_id, User.is_online)\\\n .filter(User.id != current_user.id, or_(Friendship.user_id==current_user.id, Friendship.friend_id==current_user.id))\\\n .order_by(Friendship.created_at)\\\n .all()\n\n return jsonify([{'username': f[0], 'status': f[1], 'user_id': f[2], 'isOnline': f[3]} for f in friends_list])\n\n\n@api.route('/email/download/', methods=('POST',))\ndef download_email(unique_token):\n email = Email.query.filter_by(unique_token=unique_token).first()\n\n return jsonify(email.message_html)\n\n@api.route('/game/create', methods=('POST',))\n@token_required\ndef create_game(current_user):\n game = Game()\n game.users.append(current_user)\n game.host_id = current_user.id\n\n game.start = randomize_page()\n game.target = randomize_page()\n\n init_clics = dict()\n\n # Todo when start game\n init_clics[current_user.username] = {'clics': 0, 'page': game.start}\n\n game.clics = init_clics\n\n db.session.add(game)\n db.session.flush()\n db.session.commit()\n\n socketio().emit(\"NEW_GAME\", game.to_dict('game'), to='lobby')\n\n response = game.to_dict('game')\n return jsonify(response)\n\n@api.route('/game/join', methods=('POST',))\n@token_required\ndef join_game(current_user):\n game = Game.query.filter_by(id=request.get_json().get('id')).first()\n\n if current_user not in game.users:\n\n game.users.append(current_user)\n\n clics = game.clics\n\n clics[current_user.username] = {'clics': 0, 'page': game.start}\n\n game.clics = clics\n\n db.session.add(game)\n db.session.flush()\n db.session.commit()\n\n response = game.to_dict('game')\n return jsonify(response)\n\n\n@api.route('/game/page/', methods=('GET',))\n@token_required\ndef get_page(current_user, title):\n try:\n game = Game.query.filter(Game.users.contains(current_user), Game.winner == None).first()\n\n game.clics[current_user.username] = {\"clics\": game.clics[current_user.username][\"clics\"] + 1, \"page\": title}\n except Exception as e:\n return jsonify({\"message\": str(e)}), 500\n\n if len(request.args):\n title = f'{title}?'\n if 'pagefrom' in request.args.keys():\n title += f'pagefrom={request.args[\"pagefrom\"]}'\n if 'pageuntil' in request.args.keys():\n title += f'pageuntil={request.args[\"pageuntil\"]}'\n\n page = get_wiki_page(title)\n\n room = f'{game.start}_{game.target}'\n\n event = \"PAGE_CHANGED\"\n\n if game.target == urllib.parse.unquote(title).replace(' ', '_'):\n event = \"GAME_FINISHED\"\n game.winner_id = current_user.id\n socketio().emit(\"FINISH_GAME\", game, to='lobby')\n\n db.session.commit()\n\n socketio().emit(event, game.to_dict('game'), to = room)\n return page\n\n\n@api.route('/game/link/', methods=('GET',))\n@token_required\ndef get_summary_page(current_user, title):\n return getSummaryWikiPage(title)\n\n@api.route('/game/launch', methods=('POST',))\n@token_required\ndef launch(current_user):\n try:\n game = Game.query.filter(Game.users.contains(current_user), Game.winner == None).first()\n\n if game.host_id == current_user.id:\n game.is_started = True\n game.started_at = datetime.utcnow()\n\n db.session.commit()\n except Exception as e:\n print(e)\n return jsonify({\"message\": str(e)}), 500\n\n if game.is_started:\n room = f'{game.start}_{game.target}'\n socketio().emit(\"START_GAME\", game.to_dict('game'), to=room)\n socketio().emit(\"GAME_STARTED\", game.to_dict('game'), to='lobby')\n\n return jsonify({'started': True})\n\n return jsonify({'started': False})\n\n\n@api.route('/games', methods=('GET',))\n@token_required\ndef getGames(current_user):\n response = [g.to_dict('game') for g in Game.query.all()]\n\n return jsonify(response)\n\ndef socketio():\n return current_app.extensions['socketio']","sub_path":"server/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"144681239","text":"#100 이하의 자연수중 8의 배수이지만 12의 배수는 아닌 것을 출력하기.\n\na = 8\nwhile a <= 100:\n if a % 8 == 0 and a % 12 != 0 :\n print(a)\n a += 8\n else:\n a += 8 #a += 8 은 반복되는 녀석이니깐 if밖으로 뺐으면 더 좋았을 듯.\n\n#모범답안\ni = 1\nwhile i <= 100:\n if i % 8 == 0 and i % 12 != 0:\n print(i)\n i += 1\n\n'''\n오래전에 제가 강의 만드신 분이 비슷한 질문에 댓글을 남기신 걸 봤습니다.\n\n100% 토시하나까지 똑같진 않지만\n\n강의보다 문제는 어렵게 만들어졌다, 강의보고 바로 풀 수 있는 문제보다는 생각을 많이할 수 있는\n\n문제를 만들었다는 설명을 본 기억이 있습니다.\n\n저도 처음에 할 때는 막 엄청 쉽고 그러진 않았습니다.\n\n며칠씩 고민한 문제도 있었고, 중간에 포기할 뻔한 문제도 있었어요(하노이의 탑 나쁜넘..........)\n\n그런데 포기하지 않고 끝까지 해서 결국 완강했습니다. 그 경험은 단순한 지식습득을 넘어서 제가 지금까지\n\n개발을 계속하고, 또 새로운걸 습득하고 난관들을 해결하는데 큰 도움이 되었습니다.\n\n절대 포기하지만 마세요. 처음이라, 익숙하지 않아서 그러실 겁니다.\n\n최대 해결할 수 있는 시간을 잡아놓고, 그래도 안풀리면 너무 어려우시면 조금씩 커뮤니티에 힌트를 달라고 올려주셔도 됩니당^^\n\n바로 문제 해결을 위해 코딩부터 하지 마시고.\n\n수학 문제 풀듯이 연습장과 펜을 꺼내서 이 과제에서 요구하는 바는 무엇인지, 어떤 처리과정을 거쳐서 어떤 결과를 내야할지 등을 쭉 적으면서 생각해보세요.\n\n꼭 필요한 코드를 적지 않고 한글로 설명하듯이 쓰셔도 됩니다. 그 다음에 이걸 코드로 구현하기 위해 필요한 함수나 구문들을 적용해보는 것이죠^^\n\n퐈이팅!'''","sub_path":"3. 제어문/_Quiz2_weird mathmatics.py","file_name":"_Quiz2_weird mathmatics.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"594989392","text":"'''\nCreated on 14 Aug 2013\n\n@author: neil\n'''\nimport sys\nimport os\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")))\nfrom config import fx_config as config\nfrom utils.logger import fx_logger\nfrom utils.pre_processor import PreProcessor\nfrom pymongo import MongoClient\nfrom datetime import datetime\nimport numpy as np\n\nfrom indicators import Calculator, INDICATORS\n\nDATA_NAME = \"GBPCAD_Candlestick_10_m_BID_03.09.2012-15.09.2012.csv\" # hard-coded for now\nDATA_FILE = os.path.join(config.IN_DIR, DATA_NAME)\nCLOSE_INDEX = config.HEADER_INDEX['Close']\nHEADERS = config.HEADERS\nSPREAD = 0.0003\n\nlog = fx_logger(__name__)\nlog.setLevel(\"DEBUG\")\n\n\nclass Market(object):\n '''\n Stores market data for a single currency pair\n Provides access to this data \n (and keeps track of the \"current\" point in the data?)\n '''\n def __init__(self, pair_token, interval):\n '''\n Constructor\n '''\n log.info(\"Creating Market: {0}{1}\".format(pair_token, interval))\n self.currency_pair = pair_token\n self.interval = interval\n self.headers = []\n self.data_file = \"\"\n self.now = 0 # records the point in the data considered 'current' \n\n def __repr__(self):\n out = \"{0}{1} Market\".format(self.currency_pair, self.interval)\n if self.data_file:\n out += \" from {0}\".format(self.data_file)\n return out\n \n def load_data(self, data_file):\n '''\n load data from a file and store it\n '''\n self.data_file = data_file\n log.info(\"Loading Data from {0}\".format(data_file))\n #pre-process the data to turn dates into floats\n prep = PreProcessor(data_file)\n prepfile = prep.process()\n\n #get headers from the first row of the data file\n try:\n with open(prepfile, 'r') as dfile:\n headers = dfile.readline().rstrip().split(config.DATA_SEP)\n except OSError:\n log.error(\"Unable to open data file \" + data_file)\n sys.exit()\n \n if headers != HEADERS:\n log.error(\"Headers {0} do not match the expected (configured) values {1}\".format(\n str(headers), str(config.HEADERS)))\n sys.exit()\n self.headers = headers\n log.debug(\"Headers set: \" + str(self.headers))\n \n # get the data\n self.rowdata = np.loadtxt(prepfile, delimiter=',', skiprows=1)\n self.data = self.rowdata.swapaxes(0,1) \n \n self.info()\n \n def info(self):\n log.info(\"Data are in {0[0]} columns, each with {0[1]} items\".format(self.data.shape))\n log.info(\"Columns are \" + \", \".join(self.headers))\n \n if len(self.headers) != self.data.shape[0]:\n log.error(\"Something is wrong with the data (mismatch with number of headers)!\")\n\n def add_indicator(self, data, name):\n if name not in self.headers: \n self.data = np.vstack((self.data, data))\n self.headers.append(name)\n log.info(\"Added column \" + name + \" to market data\")\n self.info()\n else:\n log.info(\"Column {0} already present in data\".format(name))\n \n def add_all_indicators(self):\n \"\"\"\n problem is that some indicators add more than one column,\n which would need different labels\n could autolabel IND_1, IND_2, etc\n or could configure names in indicators module\n may have to do this anyway to pass in the correct data\n \"\"\"\n log.info(\"Adding available indicators to data\")\n for indicator in INDICATORS:\n pass # self.add_indicator(??, indicator)\n \n\n def dump_data(self, fname):\n floc = os.path.join(config.OUT_DIR, fname)\n np.savetxt(floc, self.data, delimiter=',',\n header = ','.join(self.headers),\n comments = '')\n\n def get_column(self, column_name):\n '''\n Return a single column of data \n '''\n try:\n ind = self.headers.index(column_name)\n except IndexError:\n log.error(\"Attempting to return column {0}, which is not contained in the data headers\".format(column_name))\n return\n\n return self.data[ind]\n \n\n def get_ask(self, tick=False):\n \"\"\"\n currently won't use last item of data\n \"\"\"\n ask_price = self.data[CLOSE_INDEX][self.now]\n if tick and not self.tick():\n return None\n \n return ask_price\n\n def get_bid(self, tick=False):\n \"\"\"\n currently won't use last item of data\n \"\"\"\n bid_price = self.data[CLOSE_INDEX][self.now] - SPREAD\n if tick and not self.tick():\n \n return None\n\n return bid_price\n\n def tick(self):\n self.now += 1\n if self.now >= len(self.data[0]) :\n log.warning(\"Reached end of data\")\n return False\n return True\n \n def base(self):\n return self.currency_pair[:3]\n\n def quote(self):\n return self.currency_pair[3:]\n\nclass PerformanceTracker(object):\n '''\n Determines the performance of a market strategy for a given market\n '''\n \n\n\nif __name__ == '__main__':\n market = Market(\"GBPCAD\", \"10m\")\n market.load_data(DATA_FILE)\n \"\"\"\n col_name = 'GBPCAD_10m' \n collection = db[col_name]\n for x in collection.find().sort([('_id', '1')]):\n _id = x['_id']\n counter += 1 \n market.collection.update({'_id': _id}, {'$set': {'DUMMY': 1}})\n print \"TOTAL:\", counter\n axis, data = market.get_indicator('DUMMY')\n print axis\n print data\n \"\"\" \n","sub_path":"src/market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"87408403","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2014 DNAnexus, Inc.\n#\n# This file is part of dx-toolkit (DNAnexus platform client libraries).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\n\nimport unittest, time\nfrom dxpy import AppError\nfrom dxpy.utils import (describe, exec_utils, genomic_utils, response_iterator, get_futures_threadpool)\nfrom dxpy.compat import USING_PYTHON2\n\n# TODO: unit tests for dxpy.utils.get_field_from_jbor, get_job_from_jbor, is_job_ref\n\nclass TestDescribe(unittest.TestCase):\n def test_is_job_ref(self):\n # Positive results\n jobref = {\"job\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\", \"field\": \"number\"}\n self.assertTrue(describe.is_job_ref(jobref))\n jobref = {\"$dnanexus_link\": jobref}\n self.assertTrue(describe.is_job_ref(jobref))\n\n # Negative results\n jobref = {\"job\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\", \"field\": \"number\", \"other\": \"field\"}\n self.assertFalse(describe.is_job_ref(jobref))\n jobref = {\"job\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\", \"field\": 32}\n self.assertFalse(describe.is_job_ref(jobref))\n jobref = {\"$dnanexus_link\": jobref}\n self.assertFalse(describe.is_job_ref(jobref))\n jobref = {\"$dnanexus_link\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\"}\n self.assertFalse(describe.is_job_ref(jobref))\n\n def test_get_resolved_jbors(self):\n resolved_jbors = {}\n orig_thing = {\"job\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\", \"field\": \"number\"}\n resolved_thing = 32\n describe.get_resolved_jbors(resolved_thing, orig_thing, resolved_jbors)\n self.assertIn(\"job-B55ZF5kZKQGz1Xxyb5FQ0003:number\", resolved_jbors)\n\n resolved_jbors = {}\n orig_thing = {\"$dnanexus_link\": {\"job\": \"job-B55ZF5kZKQGz1Xxyb5FQ0003\", \"field\": \"number\"}}\n resolved_thing = 32\n describe.get_resolved_jbors(resolved_thing, orig_thing, resolved_jbors)\n self.assertIn(\"job-B55ZF5kZKQGz1Xxyb5FQ0003:number\", resolved_jbors)\n\nclass TestErrorSanitizing(unittest.TestCase):\n def test_error_sanitizing(self):\n # ASCII str\n self.assertEqual(exec_utils._safe_unicode(ValueError(\"foo\")), \"foo\")\n # UTF-8 encoded str\n self.assertEqual(exec_utils._safe_unicode(ValueError(u\"crème\".encode(\"utf-8\"))),\n u\"cr\\xe8me\" if USING_PYTHON2 else u\"b'cr\\\\xc3\\\\xa8me'\")\n # Unicode obj\n self.assertEqual(exec_utils._safe_unicode(ValueError(u\"brûlée\")), u\"br\\xfbl\\xe9e\")\n # Not UTF-8\n if USING_PYTHON2:\n expected = \"Invalid read name: D??n?x?s [Raw error message: 496e76616c69642072656164206e616d653a2044d1c16ee878fb73]\"\n else:\n expected = \"b'Invalid read name: D\\\\xd1\\\\xc1n\\\\xe8x\\\\xfbs'\"\n self.assertEqual(exec_utils._safe_unicode(ValueError(u\"Invalid read name: DÑÁnèxûs\".encode(\"ISO-8859-1\"))), expected)\n\n def test_formatting_exceptions(self):\n self.assertEqual(exec_utils._format_exception_message(ValueError(\"foo\")), \"ValueError: foo\")\n self.assertEqual(exec_utils._format_exception_message(AppError(\"foo\")), \"foo\")\n\nclass TestGenomicUtils(unittest.TestCase):\n def test_reverse_complement(self):\n self.assertEqual(b\"TTTTAAACCG\", genomic_utils.reverse_complement(b\"CGGTTTAAAA\"))\n self.assertEqual(b\"TTTTAAACCG\", genomic_utils.reverse_complement(u\"CGGTTTAAAA\"))\n self.assertEqual(b\"TTTTAAACCG\", genomic_utils.reverse_complement(b\"cggtttaaaa\"))\n self.assertEqual(b\"TTTTAAACCG\", genomic_utils.reverse_complement(u\"cggtttaaaa\"))\n self.assertEqual(b\"NNNNNTTTTAAACCG\", genomic_utils.reverse_complement(b\"CGGTTTAAAANNNNN\"))\n self.assertEqual(b\"NNNNNTTTTAAACCG\", genomic_utils.reverse_complement(u\"CGGTTTAAAANNNNN\"))\n with self.assertRaises(ValueError):\n genomic_utils.reverse_complement(\"oops\")\n\nclass TestResponseIterator(unittest.TestCase):\n def test_basic_iteration(self):\n def task(i, sleep_for=1):\n print(\"Task\", i, \"sleeping for\", sleep_for)\n time.sleep(sleep_for)\n return i\n\n def tasks():\n for i in range(8):\n yield task, [i], {\"sleep_for\": i/8.0}\n\n for i, res in enumerate(response_iterator(tasks(), get_futures_threadpool(3))):\n self.assertEqual(i, res)\n for i, res in enumerate(response_iterator(tasks(), get_futures_threadpool(5))):\n self.assertEqual(i, res)\n for i, res in enumerate(response_iterator(tasks(), get_futures_threadpool(5), max_active_tasks=2)):\n self.assertEqual(i, res)\n for i, res in enumerate(response_iterator(tasks(), get_futures_threadpool(5), max_active_tasks=6)):\n self.assertEqual(i, res)\n\n def tasks2():\n for i in range(8):\n yield task, [i], {\"sleep_for\": (8-i)/8.0}\n\n for i, res in enumerate(response_iterator(tasks2(), get_futures_threadpool(5), num_retries=2, retry_after=0.1)):\n self.assertEqual(i, res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/python/test/test_dxpy_utils.py","file_name":"test_dxpy_utils.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449465481","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys, os, hashlib, time, json\nfrom lib.log import logger\n\nimport redis\nfrom pyres import ResQ\nfrom pyres import failure\nfrom base64 import b64decode\n\nfrom APNSWrapper import *\nimport binascii\n\ndef print_log(log):\n\tlogger.debug(log + '\\n')\n\ndef do():\n\tjobs=Jobs()\n\tjobs.send()\n\nclass Jobs(object): \n\tdef __init__(self,host='localhost',port=6379):\n\t\tHOST = 'localhost:6379'\n\t\tself.host=host\n\t\tself.port=port\n\t\tself.queue='iOSPush'\n\t\tself.resq = ResQ('%s:%i'%(self.host,self.port))\n\n\tdef push(self,queue,item):\n\t\tself.resq.push(queue,item)\n\n\tdef pop(self,queue):\n\t\treturn self.resq.pop(queue)\n\n\tdef add(self):\n\t\tpass\n\t\n\tdef send(self):\n\t\tinfo=self.pop(self.queue) #will get (None, None) if none\n\t\tinfo={'badge':1,'msg':'您有新的车票信息','token':'cb86b176ee99ae5f3387c79f1226d234599e91c7bc300e97afb034cc0009e192'}\n\t\tdeviceToken = binascii.unhexlify(info['token'])\n\t\tmessage = APNSNotification()\n\t\tmessage.token(deviceToken)\n\t\tmessage.alert(info['msg'])\n\t\tmessage.badge(info['badge'])\n\t\tmessage.sound()\n\t\twrapper = APNSNotificationWrapper('ck.pem', True)\n\t\twrapper.append(message)\n\t\twrapper.notify()\n\nif __name__=='__main__':\n\tdo()\n","sub_path":"dingpiao/ticket_daemon/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502177464","text":"import datetime\nimport sqlite3\n\n\nclass BookInfo:\n def __init__(self, title, author, price, pub_date):\n self.title = title\n self.author = author\n self.price = price\n self.pub_date = pub_date\n\n @property\n def title(self):\n return self.__title\n\n @title.setter\n def title(self, t):\n self.__title = t\n\n @property\n def author(self):\n return self.__author\n\n @author.setter\n def author(self, a):\n self.__author = a\n\n @property\n def price(self):\n return self.__price\n\n @price.setter\n def price(self, p):\n self.__price = p\n\n @property\n def pub_date(self):\n return self.__pub_date\n\n @pub_date.setter\n def pub_date(self, pub):\n self.__pub_date = pub\n\n def to_formatted(self):\n line = \"書籍名: {}\\n著者名: {}\\n価格: {}円\\n発刊日: {}\" \\\n .format(self.title, self.author, self.price, self.pub_date)\n return line\n \n\nclass BookInfoManager:\n def __init__(self):\n self.book_info = {}\n self.conn = sqlite3.connect('book_info.db')\n self.c = self.conn.cursor()\n create_table = u'''create table if not exists books(\n id integer primary key autoincrement,\n registrant varchar(64) not null, \n title varchar(64) not null,\n author varchar(64) not null,\n price integer not null,\n pub_date varchar(64) not null);\n '''\n self.c.execute(create_table)\n\n def add_book(self):\n info = BookInfo('', '', 0, datetime)\n submit = input('登録者: ')\n title = input('書籍名: ')\n author = input('著者名: ')\n price = int(input('価格: '))\n year = int(input('発刊年: '))\n month = int(input('発刊月: '))\n day = int(input('発刊日: '))\n pub_date = datetime.date(year, month, day)\n info.title = title\n info.author = author\n info.price = price\n info.pub_date = pub_date\n self.book_info[submit] = info\n\n sql = u\"insert into books (id, registrant, title, author, price, pub_date) values (null, ?, ?, ?, ?, ?)\"\n book = (submit, title, author, price, pub_date)\n self.c.execute(sql, book)\n self.conn.commit()\n\n def input_list(self):\n print('-----------------------------')\n for key, value in self.book_info.items():\n print(value.to_formatted())\n print('------------------------------')\n\n def show_database(self):\n info = BookInfo('', '', 0, datetime)\n select_sql = u'select * from books'\n print('-----------------------------')\n for row in self.c.execute(select_sql):\n submit, title, author, price, pub_date = row[1:]\n info.title = title\n info.author = author\n info.price = int(price)\n converse_date = datetime.datetime.strptime(pub_date, '%Y-%m-%d')\n info.pub_date = converse_date\n self.book_info[submit] = info\n for key, value in self.book_info.items():\n print(value.to_formatted())\n print('-----------------------------')\n\n def delete_data(self):\n submit = input('登録者を指定してください: ')\n sql = u\"delete from books where registrant=?\"\n self.c.execute(sql, (submit,))\n self.conn.commit()\n\n def delete_all(self):\n yesno = input('全てのデータを削除しますか?(yes/no): ')\n if yesno == 'yes':\n sql = u\"delete from books\"\n self.c.execute(sql)\n self.conn.commit()\n\n def run(self):\n while True:\n print('1. 蔵書データの登録\\n2, 入力データの確認\\n3. データベースの表示\\n4. データの削除\\n' + \\\n '5. データの初期化\\n9. 終了')\n num = input('番号を選んでください(1, 2, 3, 4, 5, 9): ')\n if num == '1':\n self.add_book()\n elif num == '2':\n self.input_list()\n elif num == '3':\n self.show_database()\n elif num == '4':\n self.delete_data()\n elif num == '5':\n self.delete_all()\n elif num == '9':\n self.conn.close()\n break\n\n\nif __name__ == '__main__':\n manager = BookInfoManager()\n manager.run()\n\n","sub_path":"book_control_system.py","file_name":"book_control_system.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34881759","text":"#!/usr/bin/env python3\n\n#Description of this file\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\n# logging.basicConfig(filename='log.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s') #This is to output to log.txt file\n# logging.disable(logging.CRITICAL) #Uncomment to disable all logging\n\n\ndef main():\n \"\"\"\n countries = {'CA':'Canada', 'US':'United States', 'MX': 'Mexico'}\n print(countries)\n countries['US'] = 'United States of America'\n print(countries)\n countries['GB'] = 'Great Britain'\n print(countries)\n\n #checking if a key is in the dictionary\n\n code = input('Enter country code:')\n if code in countries:\n print(countries[code])\n else:\n print('There is no country with this code.')\n\n #preventing KeyError with get() method\n print(countries.get('CA'))\n print(countries.get('IE'))\n print(countries.get('IE', 'No country with that code exists in this dictionary.'))\n\n #delete an item from a dictionary\n del countries['CA']\n print(countries)\n code = input('Enter country code to delete:')\n if code in countries:\n print('{} was deleted.'.format(countries[code]))\n del countries[code]\n else:\n print('There is no country with that code in the dictionary.')\n print(countries)\n \"\"\"\n\n \"\"\"\n #use the pop() method to delete an item\n countries = {'CA':'Canada', 'US':'United States', 'MX': 'Mexico'}\n code = input('Enter the code of country to delete.')\n # print(countries.pop(code))\n print(countries.pop(code, 'There is no country with code {} in the dictionary.'.format(code)))\n print(countries)\n \"\"\"\n\n \"\"\"\n #looping through the keys of a dictionary with .keys() method which creates a view object that is an iterator\n countries = {'CA':'Canada', 'US':'United States', 'MX': 'Mexico'}\n for code in countries.keys():\n print('{}: {}'.format(code, countries[code]))\n\n #However, since the keys are the default iterator (view object) for a dictionary, you actually don't need the .keys()\n for code in countries:\n print('{}: {}'.format(code, countries[code]))\n \"\"\"\n\n \"\"\"\n #Use the .items() method to iterate through a loop\n countries = {'CA':'Canada', 'US':'United States', 'MX': 'Mexico'}\n for code, country in countries.items():\n print('{}: {}'.format(code, country))\n\n #Use the .values() method to iterate through a loop\n for country in countries.values():\n print(country)\n\n #Converting keys of a dictionary to a list, and then sorting them\n countries = {'CA':'Canada', 'US':'United States', 'MX': 'Mexico'}\n new_list = list(countries.keys())\n print(new_list)\n new_list.sort()\n print(new_list)\n\n #Converting a two-dimensional list to a dictionary\n countries = [['GB','Great Britain'],\n ['US','United States'],\n ['MX', 'Mexico']] \n print(dict(countries))\n \"\"\"\n \"\"\"\n #A dictionary that contains other dictionaries as values\n employees = {\n 'Mark': {'title':'Director of Marketing',\n 'extension':'3245',\n 'division':'Sales and Marketing'},\n 'Fred': {'title':'Quality Assurance Manager',\n 'extension':'7567',\n 'division':'Quality Assurance'},\n 'Sally':{'title':'Vice President of Engineering',\n 'extension':'4803',\n 'division':'Research and Development'}\n }\n\n #get values from embedded dictionaries\n print(employees['Mark']['extension'])\n # print(employees['Sally']['salary']) #KeyError\n key = input('Enter the key in Mark you are interested in:')\n if key in employees['Mark'].keys():\n print('The value is', employees['Mark'][key])\n else:\n print('{} is not found in employees[\\'Mark\\']'.format(key))\n\n #use the .get() method to get values from an embedded dictionary\n print(employees.get('Mark').get('extension'))\n print(employees.get('Mark').get('email'))\n # print(employees.get('John').get('extension')) #AttributeError: 'NoneType' object has no attribute 'get'\n print(employees.get('John', {}).get('extension')) #This works unlike the line above because it returns an empty dictionary from the first .get. Thus, the second .get() has a valid dictionary to work on.\n \"\"\"\n\n #dictionary that contains lists as values\n students = {'Mark':[20, 30, 49],\n 'Fred':[49,20,30],\n 'Jerry':[49, 59, 99]}\n print(students['Mark'])\n print(students['Jerry'][2])\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"Wk9_dictionaries.py","file_name":"Wk9_dictionaries.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"297450277","text":"import os\nimport re\nimport sys\nimport urllib.parse, urllib.error\n\nimport constants\n\n\ndef extract_textdocument_pos(definition):\n\turi = ''\n\tif definition.module_path is not None:\n\t\turi = normalize_vsc_uri(definition.module_path)\n\telif definition.name is not None:\n\t\turi = normalize_vsc_uri(definition.name)\n\n\tline = definition.line;\n\tcolumn = definition.column;\n\n\t# Builtins lack line/col and resolvable 'module_path'\n\tif line is None:\n\t\treturn None\n\n\t# One position is used for the start and end of the range because VSC - at the least - works it out to mean the entire line\n\t# Not sure if this is noted and/or buried in protocol.md somewhere, but I've seen it mentioned.\n\tpos = {\n\t\t\"line\": line - 1,\n\t\t\"character\": column\n\t}\n\titem = {\n\t\t\"uri\": uri,\n\t\t\"range\": {\n\t\t\t\"start\": pos,\n\t\t\t\"end\": pos\n\t\t}\n\t}\n\n\treturn item\n\n\ndef get_symbol_kind(definition):\n\tif definition.type is None:\n\t\treturn None\n\n\tif definition.type in constants.jedi_kinds:\n\t\treturn constants.jedi_kinds[definition.type]\n\n\treturn None\n\n\ndef make_error(code, message):\n\terror = {}\n\terror[\"code\"] = code\n\terror[\"message\"] = message\n\treturn error\n\n\ndef echo(str):\n\tsys.stderr.write(str + '\\n')\n\treturn\n\n\ndef sanitize(uri):\n\turi = uri.strip()\n\n\tif uri.startswith('file:///'):\n\t\turi = uri[7:]\n\n\turi = urllib.parse.unquote(uri)\n\turi = uri.replace(\"\\\\\", '/')\n\n\t# Strip leading slash from absolute NTFS-style paths containing a drive letter, i.e. '/c:/projects/' => 'c:/projects/''\n\tif uri.startswith('/') and re.match('^\\/\\w:', uri) is not None:\n\t\turi = uri[1:]\n\n\treturn uri\n\ndef normalize_vsc_uri(path):\n\turi = path.replace('\\\\', '/')\n\turi = urllib.parse.quote(uri)\n\n\tif uri.startswith('/'):\n\t\turi = 'file://%s' % uri\n\telse:\n\t\turi = 'file:///%s' % uri\n\n\treturn uri\n","sub_path":"langserver/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395368718","text":"from flask import Flask, render_template,request\nimport webbrowser\nimport pyrebase\nimport requests\nfrom bs4 import BeautifulSoup\nfrom fake_useragent import UserAgent\nimport webbrowser\nimport re\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk import tokenize\n\n\nConfig = {\n \"apiKey\": \"AIzaSyAqGk4Z_2QAE0JSyvVNDkjhRUmrg1apot0\",\n \"authDomain\": \"vsit-2be9f.firebaseapp.com\",\n \"databaseURL\": \"https://vsit-2be9f.firebaseio.com\",\n \"projectId\": \"vsit-2be9f\",\n \"storageBucket\": \"vsit-2be9f.appspot.com\",\n \"messagingSenderId\": \"443551929217\",\n \"appId\": \"1:443551929217:web:83d9e1fe6429f56b5c4e80\",\n \"measurementId\": \"G-2M0SJERF9S\"\n}\nfirbase = pyrebase.initialize_app(Config)\ndb = firbase.database()\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \n return render_template('index.html')\n\n@app.route('/details') \n\ndef details():\n \n place = \"delhi\"#input(\"Enter the name\")\n ua={\"User-Agent\":'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'}\n \n url = \"https://www.justdial.com/\"+str(place)+\"/Physiatrist-Doctors/nct-11105148\"\n res=requests.get(url,headers=ua)\n soup = BeautifulSoup(res.content,'html.parser')\n list1=[]\n list4=[]\n name = soup.find_all(class_=\"lng_cont_name\")\n available = soup.find_all(class_=\"distnctxt rsrtopn-1\")\n addr=soup.find_all(class_='adWidth cont_sw_addr')\n list2=[]\n list3=[]\n for i in available:\n list4.append(i.get_text()) \n for i in name:\n list2.append(i.get_text())\n for i in range(len(addr)):\n list1.append(addr[i].get_text().strip()) \n urla2=\"https://www.justdial.com/Delhi/Dacc-Near-G3S-Cinema-Rohini/011PXX11-XX11-160607113128-G7M1_BZDET?xid=RGVsaGkgUGh5c2lhdHJpc3QgRG9jdG9ycw==&tab=book-appointment&reqbk=0\"\n urla4=\"https://www.justdial.com/Delhi/Dr-Singh-Near-VD-Honda-Showroom-Dwarka-Sector-7/011PXX11-XX11-180602141117-A4D6_BZDET?xid=RGVsaGkgUGh5c2lhdHJpc3QgRG9jdG9ycw==&tab=book-appointment&reqbk=0\"\n urla3=\"https://www.justdial.com/Delhi/Tulsi-Holistic-Clinic-Near-Chacha-Bhaturewala-Kamla-Nagar/011PXX11-XX11-090620173143-M3V7_BZDET?xid=RGVsaGkgUGh5c2lhdHJpc3QgRG9jdG9ycw==&tab=book-appointment&reqbk=0\"\n urla5=\"https://www.justdial.com/Delhi/Ajay-Clinic-Near-Pili-Kothi-New-Ashok-Nagar/011PXX11-XX11-110822183522-R2W3_BZDET?tab=book-appointment&dept=&stb=2\"\n urla6=\"https://www.justdial.com/Delhi/Dr-Rohit-Sharma-(Goyal-Hospital-Urology-Centre)-Near-Lajpat-Rai-Chowk-Krishna-Nagar/011PXX11-XX11-170804221516-X9J8_BZDET?xid=RGVsaGkgUGh5c2lhdHJpc3QgRG9jdG9ycw==&tab=book-appointment&reqbk=0\"\n c = \"\"\n d= \" \"\n for i in range(0,len(list2)):\n c = list2[i]\n for j in range(0,len(c)):\n if c[j]=='(':\n break\n elif c[j]==\" \":\n d=d+\"-\"\n else:\n d=d+c[j]\n list2[i]=d\n d = \" \"\n list5=[]\n\n list3=list2\n list5.append(urla2)\n list5.append(urla4)\n list5.append(urla3)\n list5.append(urla5)\n list5.append(urla6)\n return render_template('Details.html',name=list3,available = list4 , links=list5)\n@app.route('/bot_chat')\n\ndef chat():\n return render_template('bot.html')\n@app.route('/validate')\n\ndef validate():\n dict1={}\n Businesses = db.child(\"VSIT_files\").get()\n for user in Businesses.each():\n userid = user.key()\n dict1 = user.val().copy()\n inventorydb = db.child(userid).get()\n list_record=[]\n db.child(\"VSIT_files\").remove()\n\n for i in dict1.values():\n if i!=\"Record\":\n list_record.append(i)\n\n list_value=[]\n for i in range(len(list_record)):\n if i==0 or i==1 or i==2:\n continue\n else:\n list_value.append(list_record[i])\n #print(list_value)\n\n sid = SentimentIntensityAnalyzer()\n\n for x in list_record:\n if x.count('.')>1:\n lines_list = tokenize.sent_tokenize(x)\n list_record.extend(lines_list)\n list_record.remove(x)\n\n positivity = 0\n output=\"You are not depressed. You can still try our games it is fun!\"\n for x in list_record:\n ss = sid.polarity_scores(x)\n positivity+=ss['pos']\n \n if positivity <= 2.7:\n output=\"You seem to be depressed. We have medically certified games and motivational music to make you feel better. Still you can have a consultatio by visiting the contact section.\"\n\n return render_template('valid.html',positivity=positivity ,result=output) \n\nif __name__==\"__main__\":\n app.run() ","sub_path":"vsit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"634198323","text":"from flask import Flask, render_template, request, redirect, url_for\r\nimport cv2\r\nimport os\r\nimport pytesseract\r\nfrom googletrans import Translator\r\nfrom playsound import playsound\r\nfrom gtts import gTTS\r\nfrom PIL import Image\r\napp = Flask(__name__)\r\nimg_new=0\r\nx=0\r\n#y=\"\"\r\nlang1=0\r\nlang2=0\r\nlangy1=0\r\nlangy2=0\r\n# get grayscale image\r\ndef get_grayscale(image):\r\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n#thresholding\r\ndef thresholding(image):\r\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\r\n\r\ntext1=''\r\ntext2=''\r\ntext3=''\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef dropdown():\r\n colours1 = ['English', 'Chinese', 'Hindi', 'Telugu', 'Bengali', 'German', 'Russian']\r\n colours2 = ['English', 'Chinese', 'Hindi', 'Telugu', 'Bengali', 'German', 'Russian']\r\n global lang1\r\n global lang2\r\n global langy1\r\n global langy2\r\n if request.method == 'POST':\r\n if request.form['coloursa']== 'English':\r\n lang1='eng'\r\n langy1=\"English\"\r\n elif request.form['coloursa']== 'Chinese':\r\n lang1='chi_sim'\r\n langy1=\"Chinese\"\r\n elif request.form['coloursa']== 'Hindi':\r\n lang1='hin'\r\n langy1=\"Hindi\"\r\n elif request.form['coloursa']== 'Telugu':\r\n lang1='tel'\r\n langy1=\"Telugu\"\r\n elif request.form['coloursa']== 'Bengali':\r\n lang1='ben'\r\n langy1=\"Bengali\"\r\n elif request.form['coloursa']== 'German':\r\n lang1='deu'\r\n langy1=\"German\"\r\n elif request.form['coloursa']== 'Russian':\r\n lang1='rus'\r\n langy1=\"Russian\"\r\n else:\r\n pass\r\n if request.form['coloursb']== 'English':\r\n lang2='en'\r\n langy2=\"English\"\r\n elif request.form['coloursb']== 'Chinese':\r\n lang2='zh-cn'\r\n langy2=\"Chinese\"\r\n elif request.form['coloursb']== 'Hindi':\r\n lang2='hi'\r\n langy2=\"Hindi\"\r\n elif request.form['coloursb']== 'Telugu':\r\n lang2='te'\r\n langy2=\"Telugu\"\r\n elif request.form['coloursb']== 'Bengali':\r\n lang2='bn'\r\n langy2=\"Bengali\"\r\n elif request.form['coloursb']== 'German':\r\n lang2='de'\r\n langy2=\"German\"\r\n elif request.form['coloursb']== 'Russian':\r\n lang2='ru'\r\n langy2=\"Russian\"\r\n else:\r\n pass\r\n global text1\r\n global text2\r\n global text3\r\n img_new = cv2.imread(r'C:\\Users\\dwara\\Downloads\\MintyPaper.png')\r\n #img_new = Image.open(r'C:\\Users\\dwara\\Downloads\\MintyPaper.png')\r\n gray = get_grayscale(img_new)\r\n thresh = thresholding(gray)\r\n filename = \"{}.png\".format(os.getpid())\r\n cv2.imwrite(filename, thresh)\r\n text1= pytesseract.image_to_string(Image.open(filename), lang=lang1)\r\n os.remove(filename)\r\n translator = Translator()\r\n text2=translator.translate(text=text1, dest=lang2)\r\n text3=str(text2.text)\r\n text3=text3.strip()\r\n if text3 == \"\":\r\n file = 'MintyPaper.png'\r\n location =r\"C:\\Users\\dwara\\Downloads\"\r\n path = os.path.join(location, file)\r\n os.remove(path)\r\n #return redirect(url_for('output2'))\r\n #global y\r\n #y=\"THIS IMAGE DOES NOT CONTAIN ANY TEXT. PLEASE UPLOAD ANOTHER IMAGE\"\r\n global x\r\n x=1\r\n return redirect(url_for('dropdown'))\r\n file = 'MintyPaper.png'\r\n location =r\"C:\\Users\\dwara\\Downloads\"\r\n path = os.path.join(location, file)\r\n os.remove(path)\r\n \r\n return redirect(url_for('output'))\r\n return render_template('inter1.html', colours1=colours1, colours2=colours2, x=x)\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef output():\r\n global text1\r\n global text2\r\n global text3\r\n global langy1\r\n global langy2\r\n \r\n #if request.method == 'POST':\r\n # fname='tempv.mp3'\r\n # loc=r\"C:\\Users\\dwara\\Desktop\\dj\\projnew2\"\r\n # myobj = gTTS(text=text1, lang='en', slow=False)\r\n # myobj.save(fname)\r\n # playsound(fname)\r\n # pt=os.path.join(loc, fname)\r\n # os.remove(pt)\r\n if request.method == 'POST':\r\n fname='tempc.mp3'\r\n loc=r\"C:\\Users\\dwara\\Desktop\\dj\\projnew2\"\r\n myobj = gTTS(text=text3, lang='en', slow=False)\r\n myobj.save(fname)\r\n playsound(fname)\r\n pt=os.path.join(loc, fname)\r\n os.remove(pt)\r\n return render_template('inter2.html', text1=text1, text2=text3, langy1=langy1, langy2=langy2)\r\n@app.route('/')\r\ndef output2():\r\n return render_template('inter3.html')\r\n\r\nif __name__ == \"__main__\":\r\n \r\n app.run(debug=True,port=5000,host=\"localhost\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"163118036","text":"import glob, os\nimport imageio\n\nd =\".\"\nfor directory in [os.path.join(d, o) for o in os.listdir(d) \n if os.path.isdir(os.path.join(d,o))]:\n images = []\n print(directory)\n s = \"\"\n for filename in sorted(glob.glob(directory+\"/*.png\")):\n s += f\"


\"\n l = directory+'/changes.txt'\n s += f\"\"\n if len(s)!=0:\n with open(f'{directory}.html','w') as f:\n f.write(s)\n\n # print(filename)\n# images.append(imageio.imread(filename,as_gray = True))\n# if images:\n# imageio.mimsave(f'{directory[2:]}.gif', images,duration = 0.3)\n\n# # os.chdir(\"/mydir\")\n\n# # for file in glob.glob(\"*.txt\"):\n","sub_path":"make_gif.py","file_name":"make_gif.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"232972085","text":"import sys\nsys.path.append('../queue_and_stack')\nfrom dll_queue import Queue\nfrom dll_stack import Stack\n\n\nclass BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n # self.left and/or self.right need to be valid nodes for us to call insert on them.\n if value < self.value:\n # check if self.left node exists\n if self.left:\n # Call insert method on the left side passing in the value\n self.left.insert(value)\n else:\n # If nothing exists on the left side\n # Then assign the value to the left side recursively,\n # We use recursion to cascade through the tree until we find an empty spot.\n self.left = BinarySearchTree(value)\n else:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BinarySearchTree(value)\n\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n if target == self.value: # If target is found, return True\n return True\n else:\n if target > self.value:\n if self.right is None:\n return False\n else:\n return self.right.contains(target)\n else:\n if self.left is None:\n return False\n else:\n return self.left.contains(target)\n\n # Return the maximum value found in the tree\n def get_max(self):\n node = self\n while(node.right is not None):\n node = node.right\n return node.value\n\n # Call the function `cb` on the value of each node\n # You may use a recursive or iterative approach\n def for_each(self, cb):\n node = self\n cb(node.value)\n # Need to iterate over all the nodes in the tree\n if node.left:\n # Execute the callback function on each node we hit \n node.left.for_each(cb)\n if node.right:\n # Execute the callback function on each node we hit \n node.right.for_each(cb)\n\n # DAY 2 Project -----------------------\n\n # Print all the values in order from low to high\n # Hint: Use a recursive, depth first traversal\n def in_order_print(self, node):\n # Check if root has a left node, if so..recursively cascade down that tree printing the value for each node\n if node.left:\n # Recurse left side of BT until their are no more left nodes.\n node.left.in_order_print(node.left)\n # Print the root value after hitting and printing every value < than the root node's value\n print(node.value)\n # Check if root has a right node, if so...recursively cascade down that tree printing the value at each node\n if node.right:\n # Recurse the right side of BT until there are no more right nodes.\n node.right.in_order_print(node.right)\n\n # Print the value of every node, starting with the given node,\n # in an iterative breadth first traversal\n def bft_print(self, node):\n # Initialize a Queue\n data = Queue()\n # Append the root node to the Queue\n data.enqueue(node)\n # Print value of left and right node for every node working our way down.\n while (data.len() > 0):\n # Dequeue the current root and assign to item so we can track where we are in the tree\n item = data.dequeue()\n # Print the item we just removed.\n print(item.value)\n # If there is a node to the left of the current node we're on...\n if item.left:\n # Add it to the Queue\n data.enqueue(item.left)\n # If there is a node to the right of the current node we're on...\n if item.right:\n # Add it to the Queue\n data.enqueue(item.right)\n # Keep running while loop until there is nothing left in Queue\n\n # Print the value of every node, starting with the given node,\n # in an iterative depth first traversal\n def dft_print(self, node):\n # Need to initalize a stack (LIFO)\n data = Stack()\n # Add given node to the stack\n data.push(node)\n while (data.len() > 0):\n # pop item off stack and assign to a variable to track location in BST\n item = data.pop()\n print(item.value)\n # Check if left/right nodes exists, if so..\n if item.left:\n # Add them to stack\n data.push(item.left)\n if item.right:\n # Add them to stack\n data.push(item.right)\n\n # STRETCH Goals -------------------------\n # Note: Research may be required\n\n # Print Pre-order recursive DFT\n def pre_order_dft(self, node):\n pass\n\n # Print Post-order recursive DFT\n def post_order_dft(self, node):\n pass\n","sub_path":"binary_search_tree/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"644226475","text":"from PIL import Image\nimport sklearn as sk\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n\n\ndef main():\n # On stock les donnees de digits dans une var dataset\n digits_ds = datasets.load_digits()\n\n # Si on veut plus de details\n # print(digits_ds.keys())\n # print(digits_ds.data)\n\n # Initialisation des variables\n compteur = len(digits_ds.data)\n myList = []\n myRes = []\n\n myRes_X = []\n myRes_Y = []\n\n # On recupere les index contenant les 0 et 1\n while compteur > 0 :\n if(digits_ds.target[compteur-1]==0 or digits_ds.target[compteur-1]==1) :\n myList.append(compteur-1)\n compteur -= 1\n\n # print(myList)\n\n # Puis on les associes avec les data dans une liste\n for i in myList :\n myRes.append([digits_ds.data[i], digits_ds.target[i]])\n myRes_X.append(digits_ds.data[i])\n myRes_Y.append(digits_ds.target[i])\n # print(myRes)\n\n # Pour chaques classes on affiche le nombre present dans notre data\n for i in [0,1] :\n # Utilisation du format qui est plus récent, préco de Jeff via les issues...\n # print(\"classe : %s, nb occurences : %s\" % (i, len(digits_ds.target[digits_ds.target == i])))\n print(\"classe : {}, nb occurences : {}\".format(i, len(digits_ds.target[digits_ds.target == i])))\n\n # Test sur le nombre total de donnees dans notre liste de 0 et 1\n #print(len(myRes))\n\n # Separation en listes de test et de train\n x_train, x_test, y_train, y_test = train_test_split(myRes_X, myRes_Y, test_size = 0.33, random_state = 40)\n #print(len(myRes) - len(myRes_test))\n #print(len(myRes_train))\n\n # On attribue les classes pour du binaire\n class0 = [x_train[index] for index, value in enumerate(y_train) if value == 0]\n\n class1 = [x_train[index] for index, value in enumerate(y_train) if value == 1]\n\n # On va attribuer les valeurs pour l'apprentissage du model et faire la regression logistique dessus\n value = [0] * len(class0) + [1] * len(class1)\n learn = class0 + class1\n o_vs_o_classifiers = LogisticRegression(solver='lbfgs').fit(learn, value)\n\n # On utilise les jdd de test pour prédire le modèle\n test_values = [(x_test[index],value) for index, value in enumerate(y_test)]\n\n for elem in test_values:\n result = o_vs_o_classifiers.predict([elem[0]])\n print(\"Resultat : \", result)\n print(\"Attendu :\", elem[1])\n if (elem[1]==result):\n print(\"OK\")\n else:\n print(\"NOK\")\n\n # Code récupéré de Nicolas Goureau pour son explication du One Vs Rest (All)\n # def generateOvRClassifier(classes):\n # o_vs_r_classifiers = {}\n # for elem in classes:\n # class_valid = [x_train[index] for index, value in enumerate(y_train) if value == elem]\n # class_invalid = [x_train[index] for index, value in enumerate(y_train) if value != elem]\n # value = [1] * len(class_valid) + [0] * len(class_invalid)\n # learn = class_valid + class_invalid\n # o_vs_r_classifiers[\"%d_rest\" % elem] = LogisticRegression(multi_class='ovr',solver='lbfgs').fit(learn, value)\n # return o_vs_r_classifiers\n\n\n # def predictOVR(test_values, o_vs_r_classifiers):\n # results = {}\n # i=0\n # for elem in test_values:\n # intern_result = {}\n # for name, classifier in o_vs_r_classifiers.items():\n # result = classifier.predict([elem[0]])\n # result_proba = classifier.predict_proba([elem[0]])\n # intern_result[name.split('_')[0]] = result_proba[0][1]\n # results[i] = intern_result\n # i+=1\n # correct = 0\n # for key, elem in results.items():\n # predicted = max(elem.items(), key=operator.itemgetter(1))[0]\n # value = test_values[key][1]\n # if int(predicted) == value:\n # correct +=1\n # #print(\"Predicted %s and value was %s\" %(predicted,value))\n # prct = (correct/len(results)*100)\n # print(f\"The One versus Rest score a {prct} % precision score \")\n\n # # One vs Rest/All\n # OvAClassifier = generateOvRClassifier(set(digits_ds['target']))\n # predictOVR(test_values, OvAClassifier)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"TP 03 - Regression Logistique/regression_logistic.py","file_name":"regression_logistic.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"347521024","text":"import pandas as pd\n\nTITLE_NAME = \"Auto List\" # The name to display\nSOURCE_NAME = \"auto_list\" # The name to be accessed by other code\nLABELS = [\"Team\",\n \"Match\",\n \"Total Success\",\n \"Total Attempt and Success\",\n \"Scale Success\",\n \"Switch Success\",\n \"First Time\",\n \"Last Time\",\n \"Action 1\",\n \"Action 2\",\n \"Action 3\",\n \"Action 4\",\n \"Action 5\"\n ] # Column labels for table, and row labels for lookup (later thing)\n\n\ndef get_rows(manager):\n auto_data_points = [\"Auto scale\", \"Auto switch\", \"Auto scale attempt\", \"Auto switch attempt\"]\n for entry in manager.entries:\n if not entry.board.alliance() == \"N\":\n times = {}\n for i in auto_data_points:\n times[i] = []\n\n actions = []\n for data_point in auto_data_points:\n for occurrence_time in entry.look(data_point):\n times[data_point].append(occurrence_time)\n actions.append((occurrence_time, data_point))\n\n if not actions:\n continue\n\n actions = sorted(actions, key=lambda x: (x[0]))\n\n num_actions = len(actions)\n action_list = []\n for i in range(5):\n if i < num_actions:\n action_list.append(actions[i][1])\n else:\n action_list.append(\"None\")\n switch_auto_successes = entry.count(\"Auto switch\")\n scale_auto_successes = entry.count(\"Auto scale\")\n switch_auto_attempts = entry.count(\"Auto switch attempt\")\n scale_auto_attempts = entry.count(\"Auto scale attempt\")\n row_data = {\n \"Team\": entry.team,\n \"Match\": entry.match,\n \"Total Success\": switch_auto_successes + scale_auto_successes,\n \"Total Attempt and Success\": (switch_auto_successes + switch_auto_attempts +\n scale_auto_successes + scale_auto_attempts),\n \"Scale Success\": scale_auto_successes,\n \"Switch Success\": switch_auto_successes,\n \"First Time\": actions[0][0] if num_actions > 0 else 0,\n \"Last Time\": actions[-1][0] if num_actions > 0 else 0,\n \"Action 1\": action_list[0],\n \"Action 2\": action_list[1],\n \"Action 3\": action_list[2],\n \"Action 4\": action_list[3],\n \"Action 5\": action_list[4]\n }\n yield row_data\n\n\ndef compute_table(manager):\n table = pd.DataFrame(get_rows(manager))[LABELS]\n return table\n","sub_path":"tools/kbpy/adapters/analysis18/auto_list.py","file_name":"auto_list.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"288779843","text":"# source: https://github.com/jfilter/clean-text/\n\nimport re\n\nCURRENCIES = {\n \"$\": \"USD\",\n \"zł\": \"PLN\",\n \"£\": \"GBP\",\n \"¥\": \"JPY\",\n \"฿\": \"THB\",\n \"₡\": \"CRC\",\n \"₦\": \"NGN\",\n \"₩\": \"KRW\",\n \"₪\": \"ILS\",\n \"₫\": \"VND\",\n \"€\": \"EUR\",\n \"₱\": \"PHP\",\n \"₲\": \"PYG\",\n \"₴\": \"UAH\",\n \"₹\": \"INR\",\n}\n\nCURRENCY_REGEX = re.compile(\n \"({})+\".format(\"|\".join(re.escape(c) for c in CURRENCIES.keys()))\n)\n\n\ndef replace_currency_symbols(inStr: str, opts: dict):\n \"\"\"\n Replace all currency symbols in ``text`` str with string specified by ``replace_with`` str.\n Args:\n text (str): raw text\n replace_with (str): if None (default), replace symbols with\n their standard 3-letter abbreviations (e.g. '$' with 'USD', '£' with 'GBP');\n otherwise, pass in a string with which to replace all symbols\n (e.g. \"*CURRENCY*\")\n Returns:\n str\n \"\"\"\n token = opts['token']\n \n if token is None:\n for k, v in constants.CURRENCIES.items():\n inStr = inStr.replace(k, v)\n return inStr\n else:\n return CURRENCY_REGEX.sub(token, inStr)","sub_path":"app_scripts/clean/replace_currency/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"193138903","text":"\n\n'''\nWrite a homeScreen Method\nPrints out prompts for the user: Open Account, Load Account, Deposit, withdraw, exit, help\nWrite out if/else structure for responses\n\n\nRANDOM NUMBER GUESSER GAME\n-Generate a random number between 1 and 20 and store it in a variable\nAsk the user to guess the number\nSay whether the number is too high or too low after each guess\nMaximum of 5 guesses\n\n\n'''\nclass ATM:\n \n def homeScreen(self):\n print(\"**************************\")\n print(\"Welcome to the ATM!\")\n option = input(\"1 - Open Account 2 - Load Account 3 - Deposit 4 - Withdraw E - Exit H - Help: \")\n #print(\"**************************\")\n if option == \"1\":\n name = input(\"What is your name?: \")\n city = input(\"What city do you live in?: \")\n checking = input(\"Do you want C-hecking or S-avings?: \")\n while checking != \"C\" and checking != \"S\":\n checking = input(\"Do you want C-hecking or S-avings?: \")\n isChecking = True\n if checking == \"S\":\n isChecking = False\n self.accountList.append(Account(name,city,isChecking))\n print(\"Account successfully added!\")\n return self.homeScreen()\n elif option == \"2\":\n if len(self.accountList) == 0:\n print(\"There are no accounts to load, try creating one!\")\n return self.homeScreen()\n else:\n self.loadAccount()\n return self.homeScreen()\n elif option == \"3\":\n if self.currentAccount == None:\n print(\"No Account Loaded\")\n return self.homeScreen()\n else:\n self.processDeposit()\n elif option == \"4\":\n if self.currentAccount == None:\n print(\"No Account Loaded\")\n return self.homeScreen()\n else:\n self.processWithdraw()\n elif option == \"H\":\n print(\"First, you need to create an account, Then you need to load an account, and then, you can do stuff with it!\")\n return self.homeScreen()\n else:\n print(\"Toodles!!!\")\n\n def processDeposit(self):\n depositAmt = int(input(\"How much do you want to deposit?: \"))\n if depositAmt < 0:\n print(\"That's called a withdraw silly!\")\n return self.processDeposit()\n elif depositAmt == 0:\n print(\"You're no closer to a million dollars!\")\n return self.processDeposit()\n else:\n self.currentAccount.deposit(depositAmt)\n print(\"New Balance: \"+str(self.currentAccount.getCurrentBalance()))\n return self.homeScreen()\n\n def processWithdraw(self):\n withdrawAmt = input(\"How much do you want to withdraw? E to Exit: \")\n if withdrawAmt == \"E\":\n return self.homeScreen()\n else:\n withdrawAmt = int(withdrawAmt)\n if withdrawAmt < 0:\n print(\"That's called a deposit silly!\")\n return self.processWithdraw()\n elif withdrawAmt == 0:\n print(\"Well what's the point??\")\n return self.processWithdraw()\n else:\n if self.currentAccount.withdraw(withdrawAmt) == False:\n print(\"Do you think money grows on trees? You don't have enough!\")\n return self.processWithdraw()\n else:\n self.currentAccount.withdraw(withdrawAmt)\n print(\"New Balance: \"+str(self.currentAccount.getCurrentBalance()))\n return self.homeScreen()\n\n\n\n '''\n Get input of name to search\n If you find an account with that name, load it in to current account\n IF no account with that name exists, print out an error\n '''\n\n\n def loadAccount(self):\n nameInput = input(\"Enter the name of the account you want to load: \")\n\n for accts in self.accountList:\n if accts.name == nameInput:\n self.currentAccount = accts\n break\n \n if self.currentAccount != None:\n print(\"Your account has been loaded!\")\n print(\"Name: \"+self.currentAccount.name)\n print(\"Balance: \"+str(self.currentAccount.currentBalance))\n return True\n else:\n print(\"No account with that name exists. Please try again\")\n return self.loadAccount()\n \n \n def __init__(self):\n self.accountList = []\n self.currentAccount = None\n\n ''' \n Define an ATM Class:\n Init method should take a single parameter: Bank name,\n set this as a property, then initialize 2 other properies:\n accountList - blank array\n current account - 'None'\n\n '''\n '''\n def addAccount(self,name,city,isChecking):\n self.accountList.append(Account(name,city,isChecking))\n return True\n '''\n\n\n\n\n\n\n\n\n\n\n\n '''\n Create an Add Account method - params should inlcude the params you used for your account\n - use those parameters to append a new account to your accountlist\n\n '''\n\n def addAccount(self,nameIn,cityIn,typeOfAccount):\n self.accountList.append(Account(nameIn,cityIn,typeOfAccount))\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\nWrite a class for an account. It should take the following parameters:\n -name\n -city\n -type of account\n\n'''\n\n\nclass Account:\n\n def __init__(self,name,city,isChecking):\n self.name = name\n self.city = city\n self.isChecking = isChecking\n self.currentBalance = 0\n\n def deposit(self,amount):\n self.currentBalance += amount\n return True\n\n def withdraw(self,amount):\n if self.currentBalance - amount < 0:\n return False\n else:\n self.currentBalance -= amount\n return True\n\n def getCurrentBalance(self):\n return self.currentBalance\n\n\n\n","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"404722446","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, absolute_import\n\nimport logging\nimport re\n\nimport feedparser\nimport requests\n\nfrom dateutil.parser import parse\nfrom flask import g, current_app\n\nfrom udata import theme\nfrom udata.app import cache, nav\nfrom udata.i18n import lazy_gettext as _\n\nlog = logging.getLogger(__name__)\n\nRE_POST_IMG = re.compile(\n r'\\.+\\.(?:png|jpg))\" .* />(?P.+)')\n\n\ngouvfr_menu = nav.Bar('gouvfr_menu', [\n nav.Item(_('Discover OpenData'), 'gouvfr.faq', items=[\n nav.Item(_('As a citizen'), 'gouvfr.faq', {'section': 'citizen'}),\n nav.Item(_('As a producer'), 'gouvfr.faq', {'section': 'producer'}),\n nav.Item(_('As a reuser'), 'gouvfr.faq', {'section': 'reuser'}),\n nav.Item(_('As a developer'), 'gouvfr.faq', {'section': 'developer'}),\n ]),\n nav.Item(_('Data'), 'datasets.list', items=[\n nav.Item(_('Datasets'), 'datasets.list'),\n nav.Item(_('Reuses'), 'reuses.list'),\n nav.Item(_('Organizations'), 'organizations.list'),\n ]),\n nav.Item(_('Dashboard'), 'site.dashboard'),\n nav.Item(_('Events'), '#', url='#', items=[\n nav.Item('Game of Code', 'gameofcode', url='http://www.gameofcode.eu/'),\n ]),\n])\n\ntheme.menu(gouvfr_menu)\n\nnav.Bar('gouvfr_footer', [\n nav.Item(_('As a citizen'), 'gouvfr.faq', {'section': 'citizen'}),\n nav.Item(_('As a producer'), 'gouvfr.faq', {'section': 'producer'}),\n nav.Item(_('As a reuser'), 'gouvfr.faq', {'section': 'reuser'}),\n nav.Item(_('As a developer'), 'gouvfr.faq', {'section': 'developer'}),\n ])\n\nnav.Bar('gouvfr_footer_support', [\n nav.Item(_('API'), 'apidoc.swaggerui'),\n nav.Item(_('Usage Guidelines for Open Data'), 'gouvfr.usage'),\n nav.Item(_('Terms of use'), 'gouvfr.terms'),\n])\n\n\n@cache.memoize(50)\ndef get_blog_post(lang):\n wp_atom_url = current_app.config.get('WP_ATOM_URL')\n if not wp_atom_url:\n return\n\n for code in lang, current_app.config['DEFAULT_LANGUAGE']:\n feed_url = wp_atom_url.format(lang=code)\n feed = feedparser.parse(feed_url)\n if len(feed['entries']) > 0:\n break\n if len(feed['entries']) <= 0:\n return None\n\n post = feed.entries[0]\n blogpost = {\n 'title': post.title,\n 'link': post.link,\n 'date': parse(post.published)\n }\n match = RE_POST_IMG.match(post.content[0].value)\n if match:\n blogpost.update(image_url=match.group('src'),\n summary=match.group('content'))\n else:\n blogpost['summary'] = post.summary\n return blogpost\n\n\n\n@theme.context('home')\ndef home_context(context):\n context['blogpost'] = get_blog_post(g.lang_code)\n return context\n","sub_path":"udata_gouvfr/theme/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396651650","text":"import sys\r\nimport threading\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n def initUI(self):\r\n self.setWindowTitle('ProgressBar Example')\r\n self.setGeometry(100, 100, 300, 200)\r\n layout = QVBoxLayout()\r\n layout.addStretch(1)\r\n label = QLabel(\"MIC Test(Pbar)\")\r\n label.setAlignment(Qt.AlignCenter)\r\n font = label.font()\r\n font.setPointSize(30)\r\n label.setFont(font)\r\n self.label = label\r\n layout.addWidget(label)\r\n layout.addStretch(1)\r\n \r\n \r\n #1-stop\r\n self.btn = QPushButton(\"User Button\")\r\n self.btn.clicked.connect(self.doAction)\r\n \r\n # progress bar\r\n self.bar = QProgressBar(self)\r\n self.bar.setValue(0)\r\n\r\n #timer\r\n self.timer = QBasicTimer()\r\n self.step = 0\r\n\r\n layout.addWidget(self.btn)\r\n \r\n centralWidget = QWidget()\r\n centralWidget.setLayout(layout)\r\n self.setCentralWidget(centralWidget)\r\n \r\n def onBtnCamClicked(self):\r\n self.bar.setValue(20)\r\n \r\n def doAction(self):\r\n if self.timer.isActive():\r\n self.timer.stop()\r\n self.btn.setText('Start')\r\n else:\r\n self.timer.start(100,self)\r\n self.btn.setText('Stop')\r\n\r\n def timerEvent(self,e):\r\n if self.step >= 100:\r\n self.timer.stop()\r\n self.btn.setText('Finished')\r\n return\r\n \r\n self.step = self.step + 1\r\n self.bar.setValue(self.step)\r\n \r\n def show(self):\r\n super().show()\r\n \r\n \r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n win = MainWindow()\r\n win.show()\r\n sys.exit(app.exec_())\r\n print(\"hi\")","sub_path":"lib/progressbar_test/pbar02_timeEvent.py","file_name":"pbar02_timeEvent.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"15845064","text":"import os\nimport time\nimport sys\nfrom telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\nimport telegram\nfrom utils import ping\n\n\ndef start(bot, update):\n update.message.reply_text(\"I'm at your service.\")\n\n\ndef hello(bot, update):\n update.message.reply_text(\n 'Hello {}'.format(update.message.from_user.first_name))\n\n\ndef pistatus(bot, update, args=None):\n lib = {\"Pi\": {\"url\": \"admin.bramboomen.nl\", \"name\": \"Pi Server\"},\n \"DLNA\": {\"url\": \"http://192.168.178.9:8200\", \"name\": \"MiniDLNA\"},\n \"Calibre\": {\"url\": \"http://192.168.178.9:8080\", \"name\": \"Calibre Server\"}}\n if not args:\n status = lib\n elif lib.get(args[0]):\n status = {args[0]: lib.get(args[0])}\n else:\n status = {}\n\n if status:\n for l in status.values():\n botping(l[\"url\"], l[\"name\"], bot, update)\n else:\n update.message.reply_text(\"I\\'m running no such service, I\\'m afraid.\")\n\n\ndef botping(url, name, bot, update):\n if ping(url) == 200:\n bot.send_message(chat_id=update.message.chat_id,\n text=\"%s is *UP*\" % name,\n parse_mode=telegram.ParseMode.MARKDOWN)\n else:\n bot.send_message(chat_id=update.message.chat_id,\n text=\"%s is *DOWN*\" % name,\n parse_mode=telegram.ParseMode.MARKDOWN)\n\n\ndef unknown(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Sorry, I don't understand.\")\n\n\ndef restart(bot, update):\n bot.send_message(update.message.chat_id, \"I will be right back.\")\n time.sleep(0.2)\n os.execl(sys.executable, sys.executable, *sys.argv)\n\n\ndef run():\n updater = Updater('437267480:AAEyg4lc01vpvpZ-9A2zKj0cBqOF94KE8-s')\n\n updater.dispatcher.add_handler(CommandHandler('start', start))\n updater.dispatcher.add_handler(CommandHandler('hello', hello))\n updater.dispatcher.add_handler(CommandHandler('restart', restart))\n updater.dispatcher.add_handler(CommandHandler('pistatus', pistatus, pass_args=True))\n updater.dispatcher.add_handler(MessageHandler(Filters.text, unknown))\n updater.dispatcher.add_handler(MessageHandler(Filters.command, unknown))\n\n print(\"*** Albert is at your service ***\")\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"albert.py","file_name":"albert.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"579335741","text":"num = []\nwhile True:\n numadd = int(input('Digite um número para adicionar a lsita: '))\n if numadd not in num:\n num.append(numadd)\n print('Valor adicionado com sucesso...')\n else:\n print('Esse número ja existe na lista !')\n op = str(input('Gostaria de continuar ? [S/N]')).upper().strip()\n while op not in 'SN':\n print('Opção incorreta tente novamente')\n op = str(input('Gostaria de continuar ? [S/N]')).upper().strip()\n if op == 'N':\n break\nprint('=' * 80)\nnum.sort()\nprint(f'Os números digitados em ordem crescente foram: {num}', end='')\n","sub_path":"ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"318962543","text":"from tools.load import LoadMatrix\nlm=LoadMatrix()\n\ntraindat = lm.load_numbers('../data/fm_train_real.dat')\ntestdat = lm.load_numbers('../data/fm_test_real.dat')\n\nparameter_list = [[traindat,testdat,10,1.2,1.3],[traindat,testdat,10,1.2,1.3]]\n\ndef kernel_sigmoid_modular(fm_train_real=traindat,fm_test_real=testdat,size_cache=10,gamma=1.2,coef0=1.3):\n\n\tfrom shogun.Features import RealFeatures\n\tfrom shogun.Kernel import SigmoidKernel\n\n\tfeats_train=RealFeatures(fm_train_real)\n\tfeats_test=RealFeatures(fm_test_real)\n\t\n\n\tkernel=SigmoidKernel(feats_train, feats_train, size_cache, gamma, coef0)\n\tkm_train=kernel.get_kernel_matrix()\n\n\tkernel.init(feats_train, feats_test)\n\tkm_test=kernel.get_kernel_matrix()\n\treturn km_train,km_test,kernel\n\t\nif __name__=='__main__':\n\tprint('Sigmoid')\n\tkernel_sigmoid_modular(*parameter_list[0])\n","sub_path":"build/shogun_lib/examples/undocumented/python_modular/kernel_sigmoid_modular.py","file_name":"kernel_sigmoid_modular.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"493454757","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport settings\nimport telepot\nfrom telepot.aio.loop import MessageLoop\nimport asyncio\nfrom telepot.aio.delegate import pave_event_space, per_chat_id, create_open\nfrom flask import Flask\nimport requests,random\nfrom database.redis import RedisClient\nimport concurrent\n\napp = Flask(__name__)\nredis=RedisClient.get_redis()\ncache_prefix=\"limit_cache_\"\nloop = asyncio.get_event_loop()\n\nclass MessageHandler(telepot.aio.helper.ChatHandler):\n def __init__(self, *args, **kwargs):\n super(MessageHandler, self).__init__(*args, **kwargs)\n\n async def on_chat_message(self, msg):\n logging.info(\"recieved message:%s\", msg)\n try:\n content_type, chat_type, chat_id = telepot.glance(msg)\n params = {'content_type': content_type, \"chat_type\": chat_type, \"chat_id\": chat_id, \"msg\": json.dumps(msg)}\n acceptId = int(chat_id)\n if checkTimeLimit(chat_id, acceptId):\n sendMsg(chat_id,chatMsg=params,triedUrl=None)\n else:\n await self.sender.sendMessage(chat_id,settings.overvisit)\n except Exception as e:\n try:\n self.close()\n except Exception as e:\n logging.error('close chathandler expcetion')\n logging.error('recieve chat message error')\n\n\n\nbot = telepot.aio.DelegatorBot(settings.token, [\n pave_event_space()(\n per_chat_id(), create_open, MessageHandler, timeout=30),\n])\n\n\n\n'''def handle(msg):\n try:\n logging.info(\"recieved message:%s\", msg)\n content_type, chat_type, chat_id = telepot.glance(msg)\n logging.info(\"after glance message\")\n params = {'content_type': content_type, \"chat_type\": chat_type, \"chat_id\": chat_id, \"msg\": json.dumps(msg)}\n sendMsg(chat_id, params,None)\n except Exception as e:\n logging.error('handling message error:%s,%s', msg, e)'''\n\n\ndef getChatHanlderServer(triedUrl):\n handlers = list(settings.HANDLER_SERVERS)\n if handlers:\n if triedUrl and handlers.count(triedUrl) > 0:\n leftHanlders=handlers.remove(triedUrl)\n if leftHanlders:\n return random.choice(leftHanlders)\n else:\n return random.choice(handlers)\n return None\n\ndef checkTimeLimit(chat_id,acceptId):\n allowed=True\n try:\n if acceptId > 0:\n times = redis.get(cache_prefix + str(chat_id))\n number = 1\n if times:\n number = int(times)\n else:\n redis.set(cache_prefix + str(chat_id), number,ex=60)\n if number > 5:\n allowed = False\n else:\n redis.incr(cache_prefix + str(chat_id))\n except Exception as e:\n logging.error('get limit number error:%s',e)\n return True\n\n\ndef sendMsg(chat_id,chatMsg,triedUrl):\n url=getChatHanlderServer(triedUrl)\n if url:\n try:\n triedUrl = url\n postMsgToServer(url,chatMsg=chatMsg)\n except Exception as e:\n logging.error('send message error,url:%s,%s',url,e)\n try:\n url = getChatHanlderServer(triedUrl)\n postMsgToServer(url, chatMsg=chatMsg)\n except Exception as e:\n logging.error('send message error again,url:%s,%s', url, e)\n else:\n bot.sendMessage(chat_id,'processing!')\n\ndef postMsg(url, chatMsg):\n return requests.post(url, data = chatMsg)\n\nasync def handle(msg):\n logging.info(\"recieved message:%s\", msg)\n try:\n content_type, chat_type, chat_id = telepot.glance(msg)\n params = {'content_type': content_type, \"chat_type\": chat_type, \"chat_id\": chat_id, \"msg\": json.dumps(msg)}\n acceptId = int(chat_id)\n if checkTimeLimit(chat_id, acceptId):\n sendMsg(chat_id, chatMsg=params, triedUrl=None)\n except Exception as e:\n if e==None:\n logging.error('handling message error:%s','')\n else:\n logging.error('handling message error:%s', e)\n\ndef postMsgToServer(url,chatMsg):\n with concurrent.futures.ThreadPoolExecutor(max_workers=300) as executor:\n future_to_url = {executor.submit(postMsg, \"http://\" + url + \"/chat/msg\", chatMsg)}\n logging.info(\"send msg success!,%s,%s\", url, chatMsg)\n #res = requests.post(\"http://\" + url + \"/chat/msg\", data=chatMsg)\n #if res and res.ok:\n #logging.info(\"send msg success!,%s,%s\", url, chatMsg)\n\ndef main():\n logging.basicConfig(filename=settings.LOG_FILE,level=settings.LOG_LEVEL,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S')\n #loop.create_task(MessageLoop(bot).run_forever())\n asyncio.ensure_future(MessageLoop(bot, handle).run_forever(relax=0.01,offset=-1))\n print('Messge Listening ...')\n loop.run_forever()\n\n\nif __name__ == '__main__':\n main()","sub_path":"server/BotChatRouteClient.py","file_name":"BotChatRouteClient.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230004281","text":"import grok\nfrom zope.app.zapi import absoluteURL\nfrom z3c.menu.simple.menu import (ContextMenuItem, GlobalMenuItem,\n TabItem, ActionItem)\n\nclass ContextMenuItem(ContextMenuItem, grok.Viewlet):\n \"\"\" Viewlet based on a specific context \n \"\"\"\n grok.baseclass()\n\n\nclass GlobalMenuItem(GlobalMenuItem, grok.Viewlet):\n \"\"\"Viewlet based on a specific site\n \"\"\"\n grok.baseclass()\n\n\nclass TabItem(TabItem, grok.Viewlet):\n \"\"\"Viewlet which renders as a Tab Item\n \"\"\"\n grok.baseclass()\n\n @property\n def url(self):\n contextURL = absoluteURL(self.context, self.request)\n return contextURL + '/' + self.viewURL\n\n def render(self):\n return self.template()\n\n\nclass ActionItem(ActionItem, grok.Viewlet):\n \"\"\"Viewlet which renders as a Action Item\n \"\"\"\n grok.baseclass()\n\n @property\n def url(self):\n contextURL = absoluteURL(self.context, self.request)\n return contextURL + '/' + self.viewURL\n\n def render(self):\n return self.template()\n\n","sub_path":"Sandbox/cklinger/megrok.ootbviewlets/trunk/src/megrok/ootbviewlets/viewlets.py","file_name":"viewlets.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604511231","text":"# -*-coding:Latin-1 -*\n\nimport os, sys\nimport pygame\nimport globals\n\nfrom gamestates.gameState import GameContext, GameState, MenuState, ActionState\n\ndef main():\n\t\"\"\"initialise pygame, l'écran, le game context, puis lance la boucle de jeu\n\n\t\"\"\"\n\tpygame.init()\n\t# globals.init()\n\n\t# définition de l'écran de par sa taille : sera modifiée plus tard\n\tscreen = pygame.display.set_mode((globals.NB_SQUARES_PER_ROW * globals.SQUARE_SIDE, globals.NB_SQUARES_PER_COL * globals.SQUARE_SIDE))\n\n\t# test game states\n\tgc = GameContext()\n\n\tclock = pygame.time.Clock()\n\twhile 1:\n\t\tgc.handle_events()\n\t\tnext_state = gc.update()\n\t\tgc.render(screen)\n\t\tpygame.display.flip()\n\n\t\tif not next_state == \"keep\":\n\t\t\t# quitter si la valeur de renvoi était nulle\n\t\t\t# à remplacer par un état d'exit en cours...\n\t\t\tif next_state == \"exit\":\n\t\t\t\treturn\n\t\t\tgc.change_state(next_state)\n\n\t\tclock.tick(globals.FPS)\n\t\nif __name__ == '__main__': main()\n","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"270208049","text":"def say_i_am_top_coder(n):\n if n == 0: #base case\n return ''\n print(\"I'm top coder\") #some logic\n say_i_am_top_coder(n - 1) #sub-problem - to never go to infinity\n\ndef fact(n):\n if n <= 1:\n return 1\n return n * fact(n - 1)\n\ndef triangel(depth):\n if depth <= 0:\n return\n triangel(depth - 1)\n print('*' * depth)\n\ndef series_3n_plus1(n):\n if n == 1:\n return 1\n if n % 2 == 0:\n return 1 + series_3n_plus1(n//2)\n return 1 + series_3n_plus1(3 * n + 1)\n\ndef sum_digits(n):\n if n == 0:\n return 0\n return n % 10 + sum_digits(n // 10)\n \ndef find_max(seq , i = 0, maximum = 0):\n\t# Describe a recursive algorithm for finding the maximum element in a sequence,\n\t# S, of n elements.\n\tif i == len(seq):\n\t\treturn maximum\n\tif seq[i] > seq[maximum]:\n\t\treturn find_max(seq, i+1, i)\n\treturn find_max(seq, i+1, maximum)\n\t\ndef power(n, p):\n\t# Draw the recursion trace for the computation of power(2,5), using the\n\t# traditional function implemented .\n\tif p == 0:\n\t\treturn 1\n\treturn n * power(n, p - 1) if (p > 0) else 1/n * power(n, p + 1)\n\ndef reverse(arr):\n\t# Draw the recursion trace for the execution of function reverse(S, 0, 5)\n\t# S = [4, 3, 6, 2, 6]\n\tcurrent_start, current_end = 0, len(arr) - 1\n\treturn _reverse(arr, current_start, current_end)\ndef _reverse(arr, current_start, current_end):\n\tif current_start >= current_end:\n\t\treturn arr\n\tarr[current_start],arr[current_end] = arr[current_end],arr[current_start]\n\treturn _reverse(arr, current_start + 1, current_end - 1)\n\ndef Harmonic_number(n, i = 1):\n\t# Describe a recursive function for computing the nth Harmonic number,\n\t# Hn = nΣi = 1 (1/i)\n\tif n == i:\n\t\treturn n\n\treturn (1/i) + Harmonic_number(n, i+1)\n\ndef add_couma(n, digits = 0):\n\t# Describe a recursive function for converting a string of digits into the integer\n\t# it represents. For example, 13531 represents the integer 13,531.\n\tif n > 0:\n\t\tadd_couma(n // 10, digits + 1)\n\t\tif digits % 3 == 0 and digits != 0:\n\t\t\tprint( str(n%10) + ',' , end = '')\n\t\telse:\n\t\t\tprint(n%10, end = '')\n\treturn ''\n\ndef max_path(m, row, column):\n return _path_(m, 0, 0, row, column)\ndef _path_(m, row, column, lengthr, lengthc):\n if row < lengthr and column < lengthc:\n return m[row][column] + max(_path_(m, row, column + 1, lengthr, lengthc), _path_(m, row + 1, column, lengthr, lengthc))\n return 0\n\n#define a function that returns a boolean if it's can reachable to end or not\ndef find_path(maze, start_point):\n i, j = start_point\n return canpass(maze, i, j, len(maze), len(maze[0]))\ndef canpass(maze, i, j, maze_height, maze_width, visited = {}):\n#check if it's invalid index\n if i < maze_height and j < maze_width and i >= 0 and j >= 0:\n #visited before\n if i not in visited:\n visited[i] = {}\n if j in visited[i]:\n return False\n else:\n visited[i][j] = True\n #wrong way\n if maze[i][j] == 'X':\n return False\n #reached\n if maze[i][j] == 'E':\n return True\n \n if canpass(maze, i, j + 1, maze_height, maze_width, visited): #search right\n return True\n if canpass(maze, i, j - 1, maze_height, maze_width, visited): #search left\n return True\n if canpass(maze, i + 1, j, maze_height, maze_width, visited):#search up\n return True\n if canpass(maze, i - 1, j, maze_height, maze_width, visited):#search down\n return True\n visited[i][j] = False\n return False\n\n# define a function that returns number of pixels can fill form any point\ndef flood_fill(picture, point):\n i, j = point\n return fill(picture, i, j, len(picture), len(picture[0]))\ndef fill(picture, i, j, picture_height, picture_width, visited = {}):\n if i < picture_height and j < picture_width and i >= 0 and j >= 0:\n if i not in visited:\n visited[i] = {}\n if j in visited[i]:\n return 0\n else:\n visited[i][j] = True\n\n if picture[i][j] == 'X':\n return 0\n \n return 1 + fill(picture, i, j + 1, picture_height, picture_width, visited) + fill(picture, i, j - 1, picture_height, picture_width, visited) +fill(picture, i + 1, j, picture_height, picture_width, visited) + fill(picture, i - 1, j, picture_height, picture_width, visited)\n return 0\n\ndef count_sections(picture):\n# to count picture sections that connected\n picture_height, picture_width = len(picture), len(picture[0])\n visited = {}\n def mark_sections(i, j):\n if i < picture_height and j < picture_width and i >= 0 and j >= 0:\n if i not in visited:\n visited[i] = {}\n\n if j in visited[i]:\n return 0\n else:\n visited[i][j] = True\n\n if picture[i][j] == 'X':\n return 0\n\n mark_sections(i, j - 1) #mark the left if it's visited\n mark_sections(i, j + 1) #mark the right if it's visited\n mark_sections(i - 1, j) #mark the up if it's visited\n mark_sections(i + 1, j) #mark the down if it's visited\n return 1\n return 0\n\n count = 0\n for i in range(picture_height):\n for j in range(picture_width):\n count += mark_sections(i, j)\n return count\n\n\n#define a function that returns all sub-sequence of given length n, of 0,1\ndef sub_sequence_of_01(n, binary_number = \"\"):\n if n > 0:\n sub_sequence_of_01(n - 1, binary_number + \"0\")\n sub_sequence_of_01(n - 1, binary_number + \"1\")\n print(binary_number)\n\n#define a function that returns all sub-sequence of given length n, of 0,1, 2\ndef sub_sequence_of_012(n, binary_number = \"\"):\n if n > 0:\n sub_sequence_of_012(n - 1, binary_number + \"0\")\n sub_sequence_of_012(n - 1, binary_number + \"1\")\n sub_sequence_of_012(n - 1, binary_number + \"2\")\n print(binary_number)\n\nif __name__ == '__main__':\n # print(triangel(7))\n print(series_3n_plus1(22))\n print(sum_digits(1234567890))\n matrix = [\n [1, 2, 90, 1],\n [8, 8, 8, 3],\n [9, 9, 1, 90]\n ]\n rows, columns = len(matrix), len(matrix[0])\n print(max_path(matrix, rows, columns))\n\n maze = [\n ['.', 'S', 'X', 'X', 'E'],\n ['.', '.', 'X', '.', '.'],\n ['.', '.', '.', '.', 'X'],\n ['X', '.', 'X', 'X', '.']\n ]\n print(find_path(maze, (0, 1)))\n\n panel = [\n ['.', '.', '.', '.', 'X', '.', '.', '.'],\n ['.', '.', '.', '.', 'X', 'X', 'X', 'X'],\n ['.', '.', 'X', '.', 'X', '.', '.', '.'],\n ['.', '.', 'X', '.', 'X', '.', 'X', '.'],\n ['.', '.', 'X', '.', 'X', '.', 'X', '.'],\n ['.', '.', '.', 'X', '.', 'X', 'X', 'X'],\n ]\n print(flood_fill(panel, (0, 0)))\n print(count_sections(panel))\n\n # sub_sequence_of_01(3)\n # sub_sequence_of_012(3)","sub_path":"Sorting and Searching techniques implementation/recursive_training.py","file_name":"recursive_training.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"297542714","text":"import os\r\nimport os.path as osp\r\nimport mmcv\r\nimport argparse\r\nfrom tqdm import tqdm\r\nfrom tools.promptdet.class_names import *\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(\r\n description='Convert Laion images of the LVIS novel categories to mmdetection format')\r\n parser.add_argument('--data-path', help='data path') # data_root\r\n parser.add_argument('--out-file', help='output path')\r\n parser.add_argument('--base-ind-file', help='index of the LVIS base categories')\r\n parser.add_argument('--topK', default=300, help='the number of images per catogory')\r\n args = parser.parse_args()\r\n return args\r\n\r\ndef file_filter(f):\r\n if f[-4:] in ['.jpg', '.png', 'bmp']:\r\n return Truemv \r\n else:\r\n return False\r\n\r\ndef main():\r\n args = parse_args()\r\n\r\n data_root = args.data_path\r\n out_file = args.out_file\r\n base_ind_file = args.base_ind_file\r\n topK_images = args.topK\r\n\r\n base_inds = open(base_ind_file, 'r').readline().strip().split(', ')\r\n base_inds = [int(ind) for ind in base_inds]\r\n\r\n annotations = []\r\n images = []\r\n obj_count = 0\r\n img_id = 0\r\n number_class_save = 0\r\n for category_id, dir_name in tqdm(enumerate(LVIS_CLASSES), total=len(LVIS_CLASSES)):\r\n image_prefix = osp.join(data_root, dir_name, '00000')\r\n if category_id in base_inds:\r\n continue\r\n\r\n number_class_save += 1\r\n filenames = os.listdir(image_prefix)\r\n filenames = list(filter(file_filter, filenames))\r\n\r\n filenames = sorted(filenames)[:topK_images]\r\n print(f\"#images of class {dir_name}: {len(filenames)}\")\r\n\r\n for filename in filenames:\r\n img_path = osp.join(image_prefix, filename)\r\n height, width = mmcv.imread(img_path).shape[:2]\r\n\r\n images.append(dict(\r\n id=img_id,\r\n file_name=osp.join(dir_name, '00000', filename),\r\n height=height,\r\n width=width))\r\n\r\n data_anno = dict(\r\n image_id=img_id,\r\n id=obj_count + 1,\r\n category_id=category_id + 1,\r\n bbox=[0, 0, 1, 1], # not used, only for compatibility with mmdetection dataloder\r\n area=1,\r\n iscrowd=0)\r\n annotations.append(data_anno)\r\n\r\n obj_count += 1\r\n img_id += 1\r\n\r\n categories = []\r\n for idx, cls_name in enumerate(LVIS_CLASSES):\r\n categories.append(dict(\r\n id=idx + 1,\r\n name=cls_name\r\n ))\r\n\r\n coco_format_json = dict(\r\n images=images,\r\n annotations=annotations,\r\n categories=categories)\r\n mmcv.dump(coco_format_json, out_file)\r\n\r\n print(f'#novel categories: {number_class_save}')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"tools/promptdet/gen_laion_novel_dataset.py","file_name":"gen_laion_novel_dataset.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"102739156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nСортировка выбором улучшает пузырьковую сортировку, совершая всего один обмен за каждый проход по списку.\nЧтобы сделать это, она ищет наибольший элемент и помещает его на соответствующую позицию.\nКак и для пузырьковой сортировки, после первого прохода самый большой элемент находится на правильном месте.\nПосле второго - на своё место становится следующий наибольший элемент.\nПроцесс продолжается, требуя n−1 проход для сортировки n элементов, поскольку последний из них автоматически оказывается на своём месте.\n\"\"\"\n\nfrom algorithms import Algorithms\n\n\nclass SelectionSort(Algorithms):\n\n @Algorithms.timing\n def selection_sort(self, data=[]):\n if data:\n for i in range(len(data)):\n min = i\n\n for j in range(i + 1, len(data)):\n min = j if (data[j] < data[min]) else min\n\n if min != i:\n data[i], data[min] = data[min], data[i]\n\n return data\n\n\nif __name__ == \"__main__\":\n sortData = [3, 4, 2, 5, 45, 67, 21, 15, 24, 17, 1, 95, 18, 72]\n sortObject = SelectionSort()\n\n print(sortData)\n print(sortObject.selection_sort(sortData))\n","sub_path":"algorithms/sort/selectionSort.py","file_name":"selectionSort.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"15451184","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0) \nimg_width = 640\nimg_height = 480\ncap.set(3, img_width)\ncap.set(4, img_height)\n\n# color range\nlower_black = (0,0,0)\nupper_black = (255,255,90)\nlower_white = (0,0,200)\nupper_white = (255,20, 255)\n#kernel_blur = np.ones((5,5),np.float32)/25 \nkernel_morph = np.ones((5,5), np.uint8)\n# cv2.Canny parameters\nthreshold1 = 100\nthreshold2 = 50\n\nmin_target_ratio = 0.05\nmin_triangle_ratio = 0.05\nmax_target_ratio = 0.50\nregion_ratio = 0.25\n\ndef findTriangle(region):\n\tcontours = cv2.findContours(region, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\tfor c in contours:\n\t\tif cv2.contourArea(c) < region.shape[0]*region.shape[1]*min_triangle_ratio:\n\t\t\tcontinue\n\t\tperimeter = cv2.arcLength(c, True)\n\t\tapprox = cv2.approxPolyDP(c, 0.085 * perimeter, True)\n\t\tprint(len(approx))\n\t\t#edges = cv2.Canny(region, threshold1, threshold2)\n\t\t#if dir == 1:\n\t\t\t#cv2.drawContours(img, [approx], -1, (0,0,255), 5)\n\t\t#cv2.imshow(\"Region\", img);\n\t\t#cv2.waitKey(20)\n\t\tif len(approx) == 2:\n\t\t\treturn True\n\treturn False\n\n\ndef findDirection(img):\n\theight = img.shape[0]\n\twidth = img.shape[1]\n\t\n\ty_upper = 0\n\tx_upper = 0\n\ty_left = 0\n\tx_left = 0\n\ty_lower = int((1-region_ratio)*height)\n\tx_lower = 0\n\ty_right = 0\n\tx_right = int((1-region_ratio)*width)\n\n\tw_horizontal = int(width)\n\th_horizontal = int(region_ratio * height)\n\tw_vertical = int(region_ratio * width)\n\th_vertical = int(height)\n\n\tregion_upper = img[y_upper:y_upper+h_horizontal, x_upper:x_upper+w_horizontal]\n\tregion_left = img[y_left:y_left+h_vertical, x_left:x_left+w_vertical]\n\tregion_lower = img[y_lower:y_lower+h_horizontal, x_lower:x_lower+w_horizontal]\n\tregion_right = img[y_right:y_right+h_vertical, x_right:x_right+w_vertical]\n\n\t#cv2.imshow(\"region\", region_right)\n\n\ttriangle_upper = findTriangle(region_upper)\n\ttriangle_left = findTriangle(region_left)\n\ttriangle_lower = findTriangle(region_lower)\n\ttriangle_right = findTriangle(region_right)\n\n\tif triangle_upper :\n\t\tif triangle_left:\n\t\t\tif triangle_lower:\n\t\t\t\tprint(\"Right\")\n\t\t\t\treturn \"Right\"\n\t\t\telif triangle_right:\n\t\t\t\tprint(\"Down\")\n\t\t\t\treturn \"Down\"\n\t\telif triangle_right and triangle_lower:\n\t\t\tprint(\"Left\")\n\t\t\treturn \"Left\"\n\telif triangle_lower and triangle_left and triangle_right:\n\t\tprint(\"Up\")\n\t\treturn \"Up\"\n\telse:\n\t\tprint(\"No arrow\")\n\t\treturn \"No arrow\"\n\n\n\nwhile True:\n\tret,frame = cap.read() \n\t# hsv_max: 255\n\thsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\tmask = cv2.inRange(hsv, lower_black, upper_black)\n\tmask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_morph)\n\tmask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel_morph)\n\t# find contours\n\tcontours = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\tcontours = sorted(contours, key = cv2.contourArea, reverse=True)\n\t#contours = contours[1]\n\t#contours = sorted(contours, key = cv2.contourArea. reverse = True)\n\t#edges = cv2.Canny(mask, threshold1, threshold2)\n\tcrop_img = None\n\tcrop_img2 = frame\n\tresult_contour = None\n\tx,w,y,h = 0,0,0,0\n\tresult = \"\"\n\n\tfor c in contours:\n\t\tif cv2.contourArea(c) < img_width*img_height*min_target_ratio or cv2.contourArea(c) > img_width*img_height*max_target_ratio:\n\t\t\tcontinue\n\t\tperimeter = cv2.arcLength(c, True)\n\t\tapprox = cv2.approxPolyDP(c, 0.02 * perimeter, True)\n\t\t#print(len(approx))\n\t\tif len(approx) == 7:\n\t\t\tresult_contour = approx\n\t\t\tx,y,w,h = cv2.boundingRect(result_contour)\n\t\t\tcrop_img = mask[y-5:y+h+5, x-5:x+w+5]\n\t\t\tresult = findDirection(crop_img)\n\t\t\tcv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n\t\t\tbreak\n\n\tcv2.drawContours(frame, [result_contour], -1, (0,0,255), 5)\n\tcv2.putText(frame, result, (x - 20, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2, cv2.LINE_AA)\n\tcv2.imshow(\"Frame\", frame)\n\n\tif cv2.waitKey(50) == 27:\n\t\tbreak\n\t\n#print(up, down, left, right)\ncv2.destroyAllWindows()\ncap.release()\n\n\n\n\n\n\n\n\n","sub_path":"InternalTestModules/detection_demo_on_PC.py","file_name":"detection_demo_on_PC.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127883680","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_json.settings import TYPES\nfrom scrapy_json.items import wubatongcheng_Item\nimport time\nimport random\n\nclass WubatongchengSpider(scrapy.Spider):\n name = 'wubatongcheng'\n allowed_domains = ['58.com']\n\n def start_requests(self):\n for key,value in TYPES.items():\n url = 'https://bj.58.com/{}'.format(value)\n page_num = 1\n yield scrapy.Request(url=url,callback=self.parse,meta={'type':key,'value':value,'page_num':page_num},encoding='utf-8',dont_filter=True)\n\n def parse(self, response):\n curr_page = response.meta['page_num']\n try:\n total_page = response.xpath('//span[@class=\"num_operate\"]/i/text()').extract()[0]\n total_page = int(total_page)\n except:\n total_page = 1\n if curr_page <= total_page:\n try:\n jobs = response.xpath('//ul[@id=\"list_con\"]/li')\n except:\n jobs = []\n for job in jobs:\n item = wubatongcheng_Item()\n item['type'] = response.meta['type']\n try:\n item['name'] = job.xpath('.//span[@class=\"name\"]/text()').extract()[0]\n except:\n item['name'] = 'null'\n item['city'] = '北京'\n try:\n item['address'] = job.xpath('.//span[@class=\"address\"]/text()').extract()[0].replace(' ','')\n except:\n item['address'] = 'null'\n try:\n item['job'] = job.xpath('.//span[@class=\"cate\"]/text()').extract()[0]\n except:\n item['job'] = 'null'\n try:\n item['salary'] = job.xpath('.//p[@class=\"job_salary\"]/text()').extract()[0]\n except:\n item['salary'] = 'null'\n try:\n item['tip'] = '/'.join(job.xpath('.//div[@class=\"job_wel clearfix\"]/span/text()').extract())\n except:\n item['tip'] = 'null'\n try:\n item['edu'] = job.xpath('.//span[@class=\"xueli\"]/text()').extract()[0]\n except:\n item['edu'] = 'null'\n try:\n item['exp'] = job.xpath('.//span[@class=\"jingyan\"]/text()').extract()[0]\n except:\n item['exp'] = 'null'\n try:\n item['comp'] = job.xpath('.//div[@class=\"comp_name\"]/a/@title').extract()[0]\n except:\n item['comp'] = 'null'\n yield item\n print(response.meta['type']+str(curr_page))\n time.sleep(random.uniform(1,2))\n curr_page += 1\n if curr_page <= total_page:\n yield scrapy.Request(url='https://bj.58.com/{}/pn{}'.format(response.meta['value'],curr_page), callback=self.parse,\n meta={'type':response.meta['type'],'value':response.meta['value'],'page_num': curr_page},\n encoding='utf-8', dont_filter=True)\n\n\n\n\n","sub_path":"Scrapy框架/scrapy_json/scrapy_json/spiders/wubatongcheng.py","file_name":"wubatongcheng.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"132200822","text":"from TrainUtils import *\nfrom Losses import Losses\nfrom DataLoader import HSDataLoader\nfrom Models import *\nfrom DataTransforms import Noise\n\nimport torch\n\n\nclass Training(Losses):\n\n def __init__(self, logger=None, **args):\n\n super().__init__(**args)\n self.params = args['params']\n if logger is None:\n logging.basicConfig(format='%(asctime)s %(message)s')\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.DEBUG)\n else:\n self.logger = logger\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.model = None\n self.architecture_params = self.params['architecture_params']\n self.get_model()\n self.model_weights_save_dir = self.params['model_weights_save_dir']\n self.model_name = self.params['model_name']\n self.losses = self.params['losses']\n self.metrics = self.params['metrics']\n self.train_steps = self.params['train_steps'] if 'train_steps' in self.params.keys() else None\n self.batch_size = self.params['batch_size']\n self.epochs = self.params['epochs']\n\n self.data_loaders = HSDataLoader(self.params, logger=self.logger, mode='train')\n\n self.checkpoints = self.params['checkpoints']\n self.display_on = False\n\n @staticmethod\n def weights_init(m):\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform(m.weight)\n if m.bias is not None:\n torch.nn.init.zeros_(m.bias)\n\n def get_model(self):\n\n self.model = self.params['model_type'](self.params['architecture_params'])\n if self.params['train_weights_path'] is not None:\n state_dict = torch.load(self.params['train_weights_path'])\n state_dict = fix_state_dict(state_dict)\n self.model.load_state_dict(state_dict)\n else:\n self.model.apply(self.weights_init)\n if torch.cuda.device_count() > 1:\n self.model = MultiGpuModel(self.model)\n self.logger.info('Using multi GPU mode with {} GPUS'.format(torch.cuda.device_count()))\n self.model = self.model.to(self.device)\n return\n\n def training_scheme(self, optimizer, scheduler, data_loaders, add_noise=False):\n noise = Noise(['Gaussian', 'Quantization'], sigma=1e-3, bits=10\n )\n\n best_model_weights = copy.deepcopy(self.model.state_dict())\n best_loss = 1e10\n\n epochs_losses = defaultdict(list)\n\n if not len(data_loaders['val']):\n test_phase = 'train'\n test_loss = 'loss_train'\n else:\n test_phase = 'val'\n test_loss = 'loss_val'\n for epoch in range(1, self.epochs + 1):\n self.logger.info('Epoch %s/%s', epoch, self.epochs)\n\n since = time.time()\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n self.model.train() # Set model to training mode\n else:\n self.model.eval() # Set model to evaluate mode\n\n metrics = defaultdict(float)\n epoch_samples = 0\n\n steps = len(data_loaders[phase]) if self.train_steps is None or phase != 'train' else\\\n min(self.train_steps, len(data_loaders[phase]))\n if phase == 'val' and not steps: # If no validation samples, skip validation phase.\n continue\n\n with tqdm(total=steps) as progress:\n step = 0\n for inputs, labels, _ in data_loaders[phase]:\n if step > steps and phase == 'train':\n break\n inputs = list(map(lambda p: p.to(self.device), inputs))\n\n if phase == 'train' and add_noise:\n inputs = noise(inputs)\n labels = labels.to(self.device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = self.model(inputs)\n loss = self.calc_loss(outputs, labels, metrics)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n epoch_samples += inputs[0].size(0)\n progress.update()\n step += 1\n if phase == 'train':\n scheduler.step()\n for param_group in optimizer.param_groups:\n self.logger.info('LR: %s', param_group['lr'])\n\n epoch_losses = defaultdict(float)\n print_metrics(metrics, epoch_samples, phase, self.logger)\n\n for metric in metrics.keys():\n\n epoch_losses[metric + '_' + phase] = metrics[metric] / epoch_samples\n epochs_losses[metric + '_' + phase].append(metrics[metric] / epoch_samples)\n epochs_losses['loss' + '_' + phase].append(metrics['loss'] / epoch_samples)\n\n # deep copy the model\n if phase == test_phase and epoch_losses[test_loss] < best_loss:\n self.logger.info(\"saving current model as best model\")\n best_loss = epoch_losses[test_loss]\n best_model_weights = copy.deepcopy(self.model.state_dict())\n\n if self.checkpoints:\n if phase == 'train' and not np.mod(epoch, self.params['checkpoints']):\n temp_weights_file = os.path.join(self.model_weights_save_dir,\n self.model_name + '_{epochs}_epochs_weights.pt'.format(\n epochs=epoch))\n self.logger.info('Saving checkpoints weights: %s', temp_weights_file)\n torch.save(self.model.state_dict(), temp_weights_file)\n\n time_elapsed = time.time() - since\n self.logger.info('Elapsed time: %.0fm %.0fs', time_elapsed // 60, time_elapsed % 60)\n\n if self.display_on:\n show_loss_statistics(epochs_losses, valid=bool(len(data_loaders['val'])))\n\n self.logger.info('Best ' + test_phase + ' loss: {:4f}'.format(best_loss))\n\n # load best model weights\n self.model.load_state_dict(best_model_weights)\n\n return self.model, epochs_losses\n\n def train(self):\n\n # Set an optimizer\n optimizer_ft = self.params['optimizer'](self.model.parameters(), lr=self.params['learning_rate'])\n\n # Set learning rate decay scheduler\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer=optimizer_ft,\n step_size=self.params['learning_rate_decay_frequency'],\n gamma=self.params['learning_rate_decay_rate'])\n\n # Run training cycle\n model, losses = self.training_scheme(optimizer_ft, exp_lr_scheduler, self.data_loaders())\n\n # Save model\n final_weights_path = os.path.join(self.model_weights_save_dir, self.model_name + '.pt')\n self.logger.info('Saving final weights: %s', final_weights_path)\n torch.save(model.state_dict(), final_weights_path)\n\n return model, losses\n\n\n\n\n","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":7752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"146921626","text":"#%%\nfrom gensim.models import Word2Vec\n\nimport pandas as pd\nimport numpy as np\n\nmodel_filename = 'gensim_W2Vmodel'\n\nfrom datetime import timedelta\n\n\n#%% \nsales = pd.read_pickle('preprocessing/sales_13_months_0.pkl')\nprint(sales.shape)\n\n#%%\n\n\nclients_list = list(sales['CardNo'].unique())\n# dates_test = sales[sales['CardNo'] == clients_list[45003]]\ndates_test = sales.head(1000)\ndates_test.columns = dates_test.columns.str.replace('SaleDate', 'date')\ndates_test.columns = dates_test.columns.str.replace('Article', 'product')\ndates_test.columns = dates_test.columns.str.replace('CardNo', 'client')\ndates_test.columns = dates_test.columns.str.replace('PositionTotal', 'sum')\n\n\n\n#%%\n\nfrom datetime import datetime\ndef join_dates(df: pd.DataFrame):\n \"\"\"\n Функция объединения близких дат в одну.\n\n :param df: Таблица данных по одному клиенту\n :return: Таблица данных со схлопнутыми значениями\n \"\"\"\n df = df.sort_values(by=['date'], ascending=[True])\n unique_dates = pd.DataFrame(df['date'].unique(), columns=['date'])\n\n # Возвращаем оригинальную таблицу, если была всего одна дата\n if len(unique_dates) == 1:\n return df\n \n unique_dates['new_date'] = unique_dates['date']\n \n dates_list = list(unique_dates['date'])\n for i in range(0, len(dates_list)-1, 2):\n if dates_list[i] == dates_list[i + 1] - timedelta(days=1):\n unique_dates.iat[i, 1] = dates_list[i + 1]\n elif dates_list[i] == dates_list[i + 1] - timedelta(days=2):\n unique_dates.iat[i, 1] = dates_list[i + 1]\n \n new_dates_list = list(unique_dates['new_date'])\n for i in range(len(new_dates_list)-1, 0, -1):\n if new_dates_list[i] == new_dates_list[i - 1] + timedelta(days=1):\n unique_dates.iat[i, 1] = new_dates_list[i - 1]\n\n df = df.merge(unique_dates, on='date')\n\n print(unique_dates)\n print(len(list(unique_dates['date'].unique())))\n print(len(list(unique_dates['new_date'].unique())))\n\n df['date'] = df['new_date']\n \n df = df.groupby(['date', 'product', 'client']).sum().reset_index()\n \n return df\n\ntest_df = join_dates(dates_test)\n\n#%% Разделить\n\n\ndef split_client_df (df: pd.DataFrame):\n if len(df) == 1:\n return [df]\n \n df = df.sort_values(by=['date'], ascending=[True])\n \n # получим список уникальных дат\n dates_list = list(df['date'].unique())\n \n if len(dates_list) == 1:\n return 'pass'\n \n new_list = []\n \n \n for date in dates_list:\n \n new_df = df.copy()\n\n \n befor_df = new_df[new_df['date'] <= date].tail(15)\n after_df = new_df[new_df['date'] > date]\n \n if after_df.shape[0] == 0:\n continue\n\n \n after_df_during_month = after_df[after_df['date'] <= str(int(date) + 100)].copy()\n after_df_after_month = after_df[after_df['date'] > str(int(date) + 100)].copy()\n after_df_during_month['product'] = after_df_during_month['product'] + '+'\n after_df_after_month['product'] = after_df_after_month['product'] + '++'\n\n \n new_df = pd.concat([befor_df, after_df_during_month, after_df_after_month])\n \n new_list.append(new_df)\n\n\n return new_list\n\n \n#%%\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\ndef convert_to_sentences(data: pd.DataFrame,\n client_column,\n data_column,\n product_column,\n sales_column,\n min_sum):\n\n sentences_list = []\n\n new_df = pd.DataFrame()\n new_df['date'] = data[data_column]\n new_df['client'] = data[client_column]\n new_df['product'] = data[product_column]\n new_df['sum'] = data[sales_column]\n\n new_df = new_df[new_df['sum'] >= min_sum]\n\n clients_list = list(new_df['client'].unique())\n\n print(len(clients_list))\n \n \n pool = ThreadPool(8)\n \n df_list = []\n \n def get_df_list(client):\n\n clients_df = new_df[new_df['client'] == client]\n \n clients_df = join_dates(clients_df)\n \n if clients_df.shape[0] == 1:\n return 1\n \n clients_df = clients_df.sort_values(by=['date', 'sum'], ascending=[True, False])\n \n split = split_client_df(clients_df)\n \n \n if split == 'pass':\n return 1\n \n for s in split:\n df_list.append(s)\n if len(df_list) % 100 == 0:\n print(len(df_list))\n\n pool.map(get_df_list, clients_list)\n \n pool.close()\n pool.join()\n \n print(len(df_list))\n \n \n for s in df_list:\n products_list = list(s['product'])\n products_list_str = list(map(str, products_list))\n sentences_list.append(products_list_str)\n \n\n return sentences_list\n\nstart = datetime.now()\ncodes_vect = convert_to_sentences(data=sales,\n client_column='CardNo',\n data_column='SaleDate',\n product_column='Article',\n sales_column='PositionTotal',\n min_sum=600)\n\nprint(datetime.now() - start)\n\n#%%\nimport pickle\n\nobject_code_vec = codes_vect\nfile_code_vec = open('codes_vect.obj', 'wb')\npickle.dump(object_code_vec, file_code_vec)\n\n\n#%%\n\ndf_tempo = pd.DataFrame([{'a': 0}, {'a': 1}])\ndf_tempo.to_csv(r'C:\\Users\\petr_\\Dropbox\\Future\\file2.csv')\n\n#%%\nmin_count = 15 # Встречается не реже чем такое количество раз\n#size = 150 # Dimensionality of the feature vectors\n\n# train word2vec on the sentences\nmodel = Word2Vec(codes_vect, workers=4, window=8, sg=0, min_count=min_count, size=130, alpha=0.005, iter=220, compute_loss=True)\nprint(model.get_latest_training_loss() // 1000000)\nmodel.save('w2v_33m_min1500rub_sg0_i220_window8_size130_during_after')\nmodel.save(r'C:\\Users\\petr_\\Dropbox\\Future\\w2v_33m_min500rub_sg0_i220_window8_size130_during_after')\n\n#%%\n\nmodel.save('w2v_mymodel_33_min40_sg0_i250_window7_size130_transSplit')\n\n#%%\n\ndef get_similar(lm_codes, topn):\n most_similar = model.wv.most_similar(positive=lm_codes, topn=topn)\n return most_similar\n\n\ndef predict_next_words(lm_codes, topn):\n predict_output_word = model.predict_output_word(lm_codes, topn=topn)\n return predict_output_word\n\n\nsimilar = ['15334425']\nmost_similar = get_similar(similar, topn=15)\nprint('Артикулы, наиболее похожие на {}'.format(similar))\nfor ms in most_similar:\n print(ms)\n\npredict = ['15334425']\nprint('После {} будет: '.format(predict))\npredict_output_word = predict_next_words(predict, topn=15)\nfor pow in predict_output_word:\n print(pow)\n\nloss = model.get_latest_training_loss()\n","sub_path":"preprocessing/gensim_appr.py","file_name":"gensim_appr.py","file_ext":"py","file_size_in_byte":6908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"301473052","text":"# -*- coding: utf-8 -*-\n\"\"\"Integration tests for methods implemented on PullRequest.\"\"\"\n\nimport github3\n\nfrom .helper import IntegrationHelper\n\n\nclass TestPullRequest(IntegrationHelper):\n\n \"\"\"PullRequest integration tests.\"\"\"\n\n def get_pull_request(self, repository='sigmavirus24/github3.py', num=235):\n \"\"\"Get the pull request we wish to use in this test.\"\"\"\n owner, repo = repository.split('/')\n p = self.gh.pull_request(owner, repo, num)\n assert isinstance(p, github3.pulls.PullRequest)\n return p\n\n def test_create_review_comment(self):\n \"\"\"Show that a user can create an in-line reveiw comment on a PR.\"\"\"\n self.basic_login()\n cassette_name = self.cassette_name('create_review_comment')\n with self.recorder.use_cassette(cassette_name):\n p = self.get_pull_request(num=286)\n comment = p.create_review_comment(\n body='Testing review comments',\n commit_id='4437428aefdb50913e2acabd0552bd13021dc38f',\n path='github3/pulls.py',\n position=6\n )\n assert isinstance(comment, github3.pulls.ReviewComment)\n\n\nclass TestReviewComment(IntegrationHelper):\n\n \"\"\"Integration tests for the ReviewComment object.\"\"\"\n\n def test_reply(self):\n \"\"\"Show that a user can reply to an existing ReviewComment.\"\"\"\n self.basic_login()\n cassette_name = self.cassette_name('reply')\n with self.recorder.use_cassette(cassette_name):\n p = self.gh.pull_request('sigmavirus24', 'github3.py', 286)\n c = next(p.review_comments())\n comment = c.reply('Replying to comments is fun.')\n assert isinstance(comment, github3.pulls.ReviewComment)\n","sub_path":"tests/integration/test_pulls.py","file_name":"test_pulls.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"162708034","text":"import json\nfrom dataclasses import asdict\nfrom typing import Optional\n\nfrom google.cloud import bigquery\n\nfrom main_logic.common.common_const import USERS_STATES, INITIAL_STATE\nfrom main_logic.common.mappings import ACTIONS_TO_COMMAND\nfrom main_logic.google_cloud.clients import DatastoreClient\nfrom main_logic.state_handling.quest_states import State, QuestStateType, QuestState, Actions\nfrom main_logic.user_managment.users_crud import User\n\n\ndef get_user_state(user: User) -> Optional[State]:\n user_id = user.get_id()\n state_record = DatastoreClient().get_client().collection(USERS_STATES).document(user_id).get().to_dict()\n print(f'user_id: {user_id}, state_record: {state_record}')\n state = None\n if state_record:\n state = State(**state_record)\n print(f'parsed_state: {state}')\n return state\n\n\ndef init_user_state(user_id: str) -> bool:\n state_ref = DatastoreClient().get_client().collection(\n USERS_STATES).document(user_id)\n state_ref.set({u'state_type': INITIAL_STATE.name})\n\n\ndef update_user_state(user: User, new_state: QuestStateType) -> bool:\n try:\n user_id = user.get_id()\n state_ref = DatastoreClient().get_client().collection(USERS_STATES).document(user_id)\n state_ref.update({u'state_type': new_state.name})\n return True\n except Exception as e:\n print(f'Update of state failed for user {user} to state: {new_state}. '\n f'Exception: {e}')\n return False\n\n\ndef get_possible_commands(cur_state: QuestStateType):\n print(f'cur_state: {cur_state}. type {type(cur_state)}')\n q = QuestState()\n actions = q.machine.get_triggers(cur_state.name)\n actions_strings = set(map(lambda x: ACTIONS_TO_COMMAND.get(Actions[x]), actions))\n return actions_strings\n\n\ndef save_user_state(user: User, state: State):\n user_id = user.get_id()\n DatastoreClient().get_client().collection(\n USERS_STATES).document(user_id).set(asdict(state))\n","sub_path":"main_logic/state_handling/state_handler.py","file_name":"state_handler.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"533457069","text":"# *************************************** CHAPTER-3_EXERCISE-1 *************************************#\\\r\n\r\n# QUESTION : MAKE A VARIABLE LIKE winning_number and assign any number to it , ask user to guess a number , if user guessed correctly then print \"YOU WIN !!!!\" \r\n# if user did'nt guessed correctly then :\r\n# 1) if user guessed lower than actual number then print \"too low\"\r\n# 2) if user guessed higher than actual number then print \"too high\"\r\n\r\n# bonus : : : google \"how to generate random number using python\" to generate random number\r\n #winning number\r\n\r\n\r\n\r\n# ****************************************************** ANSWER ******************************************************* #\r\n\r\n\r\nwinning_number = 13\r\n\r\nguessed_number = int(input(\"Enter a number between 1 to 20 : \"))\r\n\r\n\r\n\r\nif winning_number == guessed_number:\r\n print(\"You win the game\")\r\n\r\nelse: # It is called as NESTED IF-ELSE (it contain if in else that's why)\r\n if guessed_number < winning_number:\r\n print(\"too low\")\r\n\r\n if guessed_number > winning_number: \r\n print(\"too high\")\r\n\r\n","sub_path":"PYTHON COURSE/30 - chapter-3_exercise-1(WINNING_GAME).py","file_name":"30 - chapter-3_exercise-1(WINNING_GAME).py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258387187","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport filer.fields.image\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('filer', '0006_auto_20160623_1627'),\n ('cms', '0016_auto_20160608_1535'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Carousel',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(related_name='cmsplugin_carousel_ai_carousel', serialize=False, parent_link=True, primary_key=True, to='cms.CMSPlugin', auto_created=True)),\n ('name', models.CharField(verbose_name='name', max_length=160)),\n ('interval', models.FloatField(verbose_name='slide changing time in seconds', default=5.0)),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n migrations.CreateModel(\n name='Slide',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),\n ('caption', models.CharField(blank=True, verbose_name='slide caption', max_length=160)),\n ('url', models.URLField(blank=True, verbose_name='link to URL', max_length=250)),\n ('ordering', models.IntegerField(db_index=True, verbose_name='ordering', default=0, help_text='Number which determines the order of slides in carousel. Smallest value first.')),\n ('carousel', models.ForeignKey(related_name='slides', verbose_name='carousel', to='cmsplugin_carousel_ai.Carousel')),\n ('image', filer.fields.image.FilerImageField(to='filer.Image', verbose_name='slide image')),\n ('linked_page', models.ForeignKey(null=True, verbose_name='link to page', help_text='Page link overrides given URL.', blank=True, to='cms.Page')),\n ],\n options={\n 'ordering': ('ordering',),\n },\n ),\n ]\n","sub_path":"cmsplugin_carousel_ai/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"310711562","text":"import base64\nimport os\nimport threading\n\nimport zmq\nimport time\nimport sys\n\n\n\nfrom Data.data import Data\n\n\nclass FileSystem:\n def __init__(self):\n pass\n def write(self,current_size,video,user_id,file_name):\n c = video.encode('ascii')\n c = base64.decodebytes(c)\n currentPath = \"./\"+str(Data.id)+\"/\"+\"[\" + str(user_id) + \"] \"+file_name+\".mp4\"\n mode = \"ab\"\n if (not os.path.exists(str(Data.id))):\n os.makedirs(str(Data.id))\n\n\n if(os.path.exists(currentPath)):\n size = os.path.getsize(currentPath)\n if size != current_size:\n print(str(size) + \" : \"+str(current_size))\n mode = \"wb\"\n\n\n\n with open(currentPath, mode) as binary_file:\n binary_file.write(c)\n\n","sub_path":"Data Keeper Tracker/Functions/FileSystem.py","file_name":"FileSystem.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"425871007","text":"from __future__ import division\nfrom coopr import pyomo\n\n# define an abstract life-cycle savings model\nmodel = pyomo.AbstractModel()\n\n##### Define model parameters #####\n\n# time horizon\nmodel.T = pyomo.Param(doc=\"time horizon\", within=pyomo.NonNegativeIntegers)\nmodel.periods = pyomo.RangeSet(0, model.T)\n\n# retirement age\nmodel.R = pyomo.Param(doc=\"retirement age\", within=pyomo.NonNegativeIntegers)\n\n# net interest rate\nmodel.r = pyomo.Param(doc='interest rate', within=pyomo.NonNegativeReals)\n\n# wages\nmodel.w0 = pyomo.Param(doc='initial real wage', within=pyomo.NonNegativeReals)\nmodel.g = pyomo.Param(doc='growth rate of real wages', within=pyomo.NonNegativeReals)\n\ndef wage_schedule(model, t):\n \"\"\"Defines the path of wages. This should really go in the .dat file!\"\"\"\n # extract parameters\n w0 = model.w0\n g = model.g\n \n if t < model.R:\n wage = (1 + g)**t * w0\n else:\n wage = 0.0\n return wage\n\nmodel.w = pyomo.Param(model.periods, doc='real wages', within=pyomo.NonNegativeReals,\n initialize=wage_schedule)\n\n# labor endowment\nmodel.l_bar = pyomo.Param(doc='labor endowment', within=pyomo.NonNegativeReals)\n\n# depreciation factor for physical capital\nmodel.delta = pyomo.Param(doc='depreciation factor', within=pyomo.NonNegativeReals)\n\n# define utilty parameters\nmodel.beta = pyomo.Param(doc='discount factor', within=pyomo.NonNegativeReals)\nmodel.sigma = pyomo.Param(doc='inverse of elasticity of substitution for consumption',\n within=pyomo.NonNegativeReals)\n\n# define borrowing constraint\nmodel.minimum_capital = pyomo.Param(doc='lower bound on capital holdings.')\n\n##### Define model variables #####\n\n# declare consumption variable\ndef initial_consumption(model, t):\n \"\"\"Rule for initial choice of consumption.\"\"\"\n return 0.5\n \nmodel.consumption = pyomo.Var(model.periods, \n name='consumption', \n doc=\"agent's consumption choice is a flow variable!\", \n domain=pyomo.PositiveReals, \n initialize=initial_consumption)\n\n# declare investment variable\ndef initial_investment(model, t):\n \"\"\"Rule for initial choice of consumption.\"\"\"\n return 0.5\n \nmodel.investment = pyomo.Var(model.periods, \n name='investment', \n doc=\"agent's investment choice is a flow variable!\", \n domain=pyomo.Reals, \n initialize=initial_investment)\n\n# declare capital variable\ndef initial_capital(model, t):\n \"\"\"\n Rule for initializing assets. Ideally this should be feasible given \n rules for initializing consumption variable.\n \n \"\"\"\n # extract variables\n i = model.investment\n k = model.capital\n \n # extract parameters\n delta = model.delta\n \n if t == 0:\n capital = 0.0\n else:\n capital = (1 - delta) * k[t-1] + i[t-1]\n \n return capital\n\nmodel.capital = pyomo.Var(pyomo.RangeSet(0, model.T+1), \n name='capital', \n doc='agent capital holdings are a stock variable!',\n initialize=initial_capital)\n\n##### define the objective function #####\n\ndef flow_utility(model, c):\n \"\"\"Flow utility function for the agent.\"\"\" \n # extract parameters\n sigma = model.sigma\n \n # agent likes to eat...\n utility_consumption = c**(1 - sigma) / (1 - sigma)\n \n return utility_consumption\n\ndef lifetime_utility(model):\n \"\"\"Abstract representation of our model objective.\"\"\" \n # extract variables\n c = model.consumption\n \n # extract parameters\n beta = model.beta\n T = model.periods\n \n # compute utility\n U = sum(beta**t * flow_utility(model, c[t]) for t in T)\n \n return U \n\nmodel.lifetime_utility = pyomo.Objective(rule=lifetime_utility, \n sense=pyomo.maximize)\n\n##### Define the model constraints #####\n\ndef flow_budget_constraints(model, t):\n \"\"\"Agent faces a sequence of flow budget constraints\"\"\"\n # extract variables\n k = model.capital\n c = model.consumption\n i = model.investment\n \n # extract parameters\n r = model.r\n w = model.w\n l_bar = model.l_bar\n \n return c[t] + i[t] == w[t] * l_bar + r * k[t]\n \nmodel.budget_constraints = pyomo.Constraint(model.periods, \n rule=flow_budget_constraints,\n doc='Agent faces a sequence of flow budget constraints.')\n\ndef capital_evolution_rule(model, t):\n \"\"\"Agent's capital stock evolves depending on current capital stock and investment rate.\"\"\"\n # extract variables\n k = model.capital\n i = model.investment\n \n # extract parameters\n delta = model.delta\n \n return k[t+1] == (1 - delta) * k[t] + i[t]\n \nmodel.capital_evolution = pyomo.Constraint(model.periods, \n rule=capital_evolution_rule,\n doc='Equation of motion for capital stock.')\n\ndef borrowing_constraint(model, t):\n \"\"\"Agent's capital cannot fall below some minimum amount.\"\"\"\n return model.capital[t] >= model.minimum_capital\n\nmodel.borrowing_constraint = pyomo.Constraint(model.periods, \n rule=borrowing_constraint,\n doc='There is a lower bound on agent capital.')\n\ndef endowment(model):\n \"\"\"Agent has some initial capital.\"\"\"\n return model.capital[0] == 0.0\n\nmodel.endowment = pyomo.Constraint(rule=endowment, \n doc='Agent has some initial endowment.')\n\ndef no_bequests(model):\n \"\"\"Agent leaves no bequests.\"\"\"\n return model.capital[model.T+1] == 0.0\n\nmodel.no_bequests = pyomo.Constraint(rule=no_bequests,\n doc='Agent makes no bequests.')","sub_path":"optimization/basic_lifecycle2.py","file_name":"basic_lifecycle2.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"443808086","text":"\n\n#calss header\nclass _PULSAR():\n\tdef __init__(self,): \n\t\tself.name = \"PULSAR\"\n\t\tself.definitions = [u'a very small dense (= heavy in relation to its size) star that sends out radio waves']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_pulsar.py","file_name":"_pulsar.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"603443225","text":"import numpy as np\n\n\nfrom pusion.core.combiner import UtilityBasedCombiner\nfrom pusion.util.constants import *\n\n\nclass ComplementaryOutputCombiner(UtilityBasedCombiner):\n \"\"\"\n The :class:`ComplementaryOutputCombiner` combines fully complementary decision outputs by concatenating individual\n decisions across classes for each sample.\n \"\"\"\n\n _SUPPORTED_PAC = [\n (Problem.MULTI_CLASS, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY),\n (Problem.MULTI_CLASS, AssignmentType.CRISP, CoverageType.COMPLEMENTARY),\n (Problem.MULTI_LABEL, AssignmentType.CONTINUOUS, CoverageType.COMPLEMENTARY),\n (Problem.MULTI_LABEL, AssignmentType.CRISP, CoverageType.COMPLEMENTARY)\n ]\n\n SHORT_NAME = 'COB'\n\n def __init__(self):\n UtilityBasedCombiner.__init__(self)\n\n def combine(self, decision_outputs):\n \"\"\"\n Combine fully complementary decision outputs by concatenating individual decisions according to the coverage\n of all classifiers. Due to the nature of complementary class coverage, no fusion between redundant class\n assignments is required.\n\n :param decision_outputs: `list` of `numpy.array` matrices, each of shape `(n_samples, n_classes')`,\n where `n_classes'` is classifier-specific and described by the coverage. Each matrix corresponds to\n one of `n_classifiers` classifiers and contains crisp or continuous decision outputs per sample.\n\n :return: A matrix (`numpy.array`) of either crisp or continuous class assignments which represents fused\n decisions obtained by the highest cumulative cosine-similarity. Axis 0 represents samples and axis 1 the\n class labels which are aligned with axis 2 in ``decision_tensor`` input tensor.\n \"\"\"\n n_classes = np.sum([len(ca) for ca in self.coverage])\n fused_decisions = np.zeros_like((len(decision_outputs[0]), n_classes))\n\n for i, classifier_coverage in enumerate(self.coverage):\n for ci in classifier_coverage:\n fused_decisions[:, ci] = decision_outputs[i, :, ci]\n return fused_decisions\n","sub_path":"pusion/core/complementary_output_combiner.py","file_name":"complementary_output_combiner.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"491041715","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom crm.models import Problem\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate, logout\nfrom crm.models import *\n\n\n# Основная страница\ndef index(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n problems = Problem.objects.all().exclude(closed='on')\n return render(request, 'index.html', {'problems': problems})\n\n\n# Функция просмотра информации о жалобе\ndef details(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n\n if request.method == 'GET':\n id = request.GET.get('id')\n problem = Problem.objects.get(pk=id)\n return render(request, 'details.html', {'problem': problem})\n\n if request.method == 'POST':\n id = request.GET.get('id')\n problem = Problem.objects.get(pk=id)\n return redirect('/problem?id={}'.format(problem.id))\n\n\n# Функция добавления жалобы\ndef add(request):\n if request.method == 'GET':\n problems = Problem.objects.all()\n return render(request, 'add.html', {'problems': problems})\n if request.method == 'POST':\n name = request.POST.get('name', '')\n room = request.POST.get('room', '')\n about = request.POST.get('about', '')\n creator = request.user\n\n if name == '':\n messages.add_message(request, messages.ERROR, 'Заполните все поля!')\n return redirect('/add')\n\n problem = Problem()\n problem.name = name\n problem.room = room\n problem.about = about\n problem.creator = creator\n like = Like()\n like.save()\n problem.likes = like\n problem.save()\n\n return redirect('/problem?id={}'.format(problem.id))\n\n\ndef logout_page(request):\n logout(request)\n return redirect('/login')\n\n\ndef login_page(request):\n if request.method == 'GET':\n return render(request, 'login.html')\n if request.method == 'POST':\n form = LoginValidation(request.POST)\n if not form.is_valid():\n return HttpResponse('Заполните данные корректно!')\n\n username = request.POST['login']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n\n if user is None:\n return HttpResponse('Неверные данные!')\n login(request, user)\n return redirect('/')\n\n\ndef register(request):\n if request.method == 'GET':\n return render(request, 'register.html')\n if request.method == 'POST':\n form = RegisterValidation(request.POST)\n if not form.is_valid():\n return HttpResponse('Заполните все поля!')\n\n user = User()\n user.username = request.POST.get('login')\n user.email = request.POST.get('email')\n user.set_password(request.POST.get('password'))\n user.save()\n\n login(request, user)\n\n return redirect('/')\n\n\ndef myproblems(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n\n problems = Problem.objects.all().filter(creator=request.user)\n return render(request, 'myproblems.html', {'problems': problems})\n\n\ndef edit(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n id = request.GET.get('id')\n problem = Problem.objects.get(pk=id)\n if request.method == 'GET':\n return render(request, 'edit.html', {'problem': problem})\n\n if request.method == 'POST':\n helper = request.POST.get('helper')\n closed = request.POST.get('closed')\n\n problem.helper = helper\n problem.closed = closed\n problem.save()\n\n return redirect('/problem?id={}'.format(problem.id))\n\n\ndef panel(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n if not request.user.is_superuser:\n return HttpResponse('Вы не имеете прав!')\n problems = Problem.objects.all().exclude(closed='on')\n problems1 = Problem.objects.all().filter(helper=request.user).exclude(closed='on')\n return render(request, 'panel.html', {'problems': problems, 'problems1': problems1})\n\n\ndef archive(request):\n if not request.user.is_authenticated:\n return redirect('/login')\n\n problems = Problem.objects.all().filter(closed='on')\n return render(request, 'archive.html', {'problems': problems})\n\n\ndef makelike(request):\n id = request.GET.get('id')\n if request.user.is_authenticated:\n user_tags = User.objects.filter(likedone=id)\n current_user = request.user\n if current_user not in user_tags:\n try:\n like = Like.objects.get(id=id)\n like.thumbnumber += 1\n like.likedone.add(current_user)\n like.save()\n return redirect('/problem?id={}'.format(id))\n except ObjectDoesNotExist:\n return redirect('/problem?id={}'.format(id))\n else:\n return redirect('/problem?id={}'.format(id))\n else:\n return redirect('/problem?id={}'.format(id))\n","sub_path":"Desktop/Complains/crm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141288109","text":"import sys\n\ndef main():\n if(len(sys.argv) != 2):\n print(\"Please provide a single input string!\")\n return 1\n \n userInput = input(\"plaintext: \")\n \n k = int(sys.argv[1])\n \n for letter in userInput:\n letterNum = ord(letter)\n if letterNum >= 65 and letterNum <= 65 + 26:\n print(chr(((letterNum - 65 + k) % 26) + 65), end = '')\n elif letterNum >= 97 and letterNum <= 97 + 26:\n print(chr(((letterNum - 97 + k) % 26) + 97), end = '')\n else:\n print(letter)\n print(\"\")\n return 0\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"week 8/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"143235652","text":"import re\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport shutil\r\n\r\ndef Satellite():\r\n url = \"https://www.cwb.gov.tw/V7/js/s1p.js\"\r\n request = urllib.request.Request(url)\r\n response = urllib.request.urlopen(request)\r\n html =response.read()\r\n soup=BeautifulSoup(html,'lxml')\r\n #print(soup.text[20:-3])\r\n\r\n #pic=soup.text[20:-3].split(',')\r\n pic=soup.text[11:].split(',')\r\n #print(pic)\r\n\r\n list_url=[]\r\n list_time=[]\r\n for pics in pic:\r\n \r\n picss=pics.strip('\\r\"\\n').split('\"')\r\n list_url.append(picss[0])\r\n list_time.append(picss[2])\r\n\r\n pic_url=\"https://www.cwb.gov.tw\"+list_url[0]\r\n return pic_url\r\n\r\n'''\r\nresponse = requests.get(pic_url, stream=True)\r\nwith open('Himawar.jpg', 'wb') as out_file:\r\n shutil.copyfileobj(response.raw, out_file)\r\ndel response\r\n\r\n'''\r\nif __name__ == '__main__':\r\n Satellite()\r\n \r\n\r\n","sub_path":"Satellite_pic_crawler.py","file_name":"Satellite_pic_crawler.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"133452767","text":"from PIL import Image, ImageChops\nimport numpy as np\nimport operator\nimport math\n\n#To find Right Most and Left Most Black point\n\ndef findExtrema(im,height,width):\n\tX_MIN = 999999\n\tX_MAX = -1000\n\tY_MIN = 999999\n\tY_MAX = -1000\n\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tstride = (width*i) + j \n\t\t\tif im[stride] == 0:\n\t\t\t\tif X_MIN > j:\n\t\t\t\t\tX_MIN = j\n\t\t\t\tif X_MAX < j:\n\t\t\t\t\tX_MAX = j\n\n\t\t\t\tif Y_MIN > i:\n\t\t\t\t\tY_MIN = i\n\t\t\t\tif Y_MAX < i:\n\t\t\t\t\tY_MAX = i\n\n\treturn (X_MIN,X_MAX,Y_MIN,Y_MAX)\n\ndef XNOR(x,y):\n\tx_inv = -1\n\ty_inv = -1\n\n\tif x == 0:\n\t\tx_inv = 1\n\telse:\n\t\tx = 1\n\t\tx_inv = 0\n\n\tif y == 0:\n\t\ty_inv = 1\n\telse:\n\t\ty = 1\n\t\ty_inv = 0\n\t\n\tres = ((x_inv and y_inv) or (x and y))\n\tif res == 1:\n\t\treturn 255\n\n\treturn res\n\t\ndef rmsdiff_1997(im1, im2):\n\t#\"Calculate the root-mean-square difference between two images\"\n\n\th = ImageChops.difference(im1, im2).histogram()\n\n\tsummation = 0\n\tdenominator = float(im1.size[0]) * im1.size[1]\n\n # calculate rms\n\tfor i in range(256):\n\t\tsummation += h[i]*(i**2)\n\n\tquotient = summation/denominator\n\treturn math.sqrt(quotient)\n\ndef calculateDiff(img1,img2):\n\ts = 0\n\tm1 = np.array(img1).reshape(*img1.size)\n\tm2 = np.array(img2).reshape(*img2.size)\n\ts += np.sum(np.abs(m1-m2))\n\treturn s\n\ndef isEqual(im1, im2):\n\tim_diff = ImageChops.difference(im1, im2)\n\tdiff_array = np.asarray(im_diff)\n\treturn not np.nonzero(diff_array)\n\t#print(diff_array[np.nonzero(diff_array)])\n\ndef mainD12(problem):\n\tim1 = Image.open(problem.figures[\"G\"].visualFilename)\n\tim2 = Image.open(problem.figures[\"H\"].visualFilename)\n\t\n\tim1 = im1.convert(\"L\")\n\tim2 = im2.convert(\"L\")\n\t\n\tdata = im1.getdata()\n\tdata2 = list(im2.getdata())\n\theight, width = im1.size\n\t\n\tim3_new_arr = list(data)\n\t\n\ttotal_length = height * width\n\t\n\tthird_arr = [0] * total_length\n\tfor i in range(total_length):\n\t\tthird_arr[i] = XNOR(im3_new_arr[i], data2[i])\n\t\n\t#Remove Noise from the final Image \n\t\n\tim3_new = Image.new(im1.mode, im1.size, \"white\")\n\tim3_new.putdata(third_arr)\n\t\n\ti = 1\n\tminDiff=99999999\n\tanswer=0\n\t\n\twhile i < 9:\n\t\timage_name = problem.figures[str(i)].visualFilename\n\t\tnewImg = Image.open(image_name)\n\t\tnewImg = newImg.convert(\"L\")\n\t\tnewDiff = rmsdiff_1997(im3_new, newImg)\n\t\tprint(\"New Difference\")\n\t\tprint(newDiff)\n\t\tprint(\"Min Difference\")\n\t\tprint(minDiff)\n\t\tif(minDiff > newDiff):\n\t\t\tminDiff = newDiff\n\t\t\tanswer = i\n\t\t#newImg.show()\n\t\ti = i + 1\n\t\n\treturn(answer)\n","sub_path":"D_12.py","file_name":"D_12.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"502647278","text":"from __future__ import with_statement\nfrom fabric.api import run, cd, env\nfrom fabric.context_managers import prefix\nimport os\n# import logging; logging.getLogger('paramiko.transport').addHandler(logging.StreamHandler())\n\nWEBAPPS_ROOT = '/home/categulario/webapps'\n\nenv.hosts = ['categulario.tk']\nenv.user = 'categulario'\n\ndef deploy(target='mateuv'):\n PROJECT_PATH = os.path.join(WEBAPPS_ROOT, '{}.scheman.tk'.format(target))\n\n with cd(PROJECT_PATH):\n run('git pull')\n\n with prefix('source env/bin/activate'):\n run('pip install -r requirements.txt')\n\n run('npm install')\n run('grunt')\n\n run('sudo systemctl restart gunicorn-scheman-{}'.format(target))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"539112710","text":"from common import *\ns=3*2**9\nk=2 # try 1, 2, 3...\nim=checkerboard(s,s//k)\nfor i in range(6):\n im[[0,-1],...]^=1\n im[...,[0,-1]]^=1\n s//=2\n ch=checkerboard((s,im.shape[1]),s//k)\n cv=checkerboard((im.shape[0]+2*s,s),s//k)\n im=np.hstack((1-cv,np.vstack((ch,im,1-ch)),cv))\nimshow(im)\nimsave(im,'p01.png')\n","sub_path":"p01.py","file_name":"p01.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"511683322","text":"import urllib2\nimport webbrowser\nimport json\n\nclass Movie():\n \"\"\" This class stores information related to Movies \"\"\"\n\n # Initialize the movie constructor with the defined parameters\n def __init__(self, movieTitle, moviePoster = \"\", movieTrailer = \"\"):\n self.title = movieTitle\n # If there is not moviePoster or movieTrailer supplied,\n # The program will search and return the closest Movie Info by using movieTitle\n if moviePoster == \"\" or movieTrailer == \"\":\n movieInfo = self.getMovieInfo()\n self.title = movieInfo[0]\n self.poster_image_url = movieInfo[1]\n self.trailer_youtube_url = movieInfo[2]\n else: \n self.poster_image_url = moviePoster\n self.trailer_youtube_url = movieTrailer\n\n\n # Method that initializes the trailer of a movie\n def init_trailer(self):\n webbrowser.open(self.trailer_youtube_url)\n\n\n # Get Movie Information from the The TMDb API and return an array with the info\n def getMovieInfo(self):\n # Format Movie Name \n formatedMovieName = self.title.replace(\" \", \"%20\")\n infoJson = self.getProcessedJson(\"https://api.themoviedb.org/3/search/movie?api_key=bb4016586720f201b8ac862369b47e87&language=en-US&query=\"+formatedMovieName+\"&page=1&include_adult=false\")\n\n # Check if Movie Was Found\n if len(infoJson['results']) > 0:\n # Grab Movie Info (ID + Title + Poster URL)\n movieID = infoJson['results'][0]['id']\n movieTitle = infoJson['results'][0]['title']\n\n # Check if poster image exists or not\n if infoJson['results'][0]['poster_path'] == None:\n moviePoster = \"https://upload.wikimedia.org/wikipedia/commons/f/fc/No_picture_available.png\"\n movieTrailer = \"https://www.youtube.com/watch?v=eq7Adzo4QAE\"\n else:\n moviePoster = \"http://image.tmdb.org/t/p/w185//\"+infoJson['results'][0]['poster_path']\n # Grab Movie Trailer\n videoJson = self.getProcessedJson(\"https://api.themoviedb.org/3/movie/\"+str(movieID)+\"/videos?api_key=bb4016586720f201b8ac862369b47e87&language=en-US\")\n movieTrailer = \"https://www.youtube.com/watch?v=\" + videoJson['results'][0]['key']\n\n else:\n movieTitle = \"Movie Not Found\"\n moviePoster = \"https://upload.wikimedia.org/wikipedia/commons/f/fc/No_picture_available.png\"\n movieTrailer = \"https://www.youtube.com/watch?v=eq7Adzo4QAE\"\n\n \n movieInfo = [movieTitle, moviePoster, movieTrailer]\n return movieInfo\n \n \n # Request + Process Json from an URL and Return it\n def getProcessedJson(self, url):\n connection = urllib2.Request(url)\n opener = urllib2.build_opener()\n fileJson = opener.open(connection)\n finalJson = json.loads(fileJson.read())\n return finalJson\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"168657108","text":"import os\nimport types\nimport traceback\n\nfrom ..parameters import ORG, APP_CODE\nfrom ..fabric import Fabric\nfrom ..parameters import APIREF, TEMP_DIR\nfrom ..lib import readSets\nfrom ..core.helpers import console, setDir, mergeDict\nfrom .find import findAppConfig, findAppClass\nfrom .helpers import getText, dm, dh\nfrom .settings import setAppSpecs, setAppSpecsApi\nfrom .links import linksApi, outLink\nfrom .text import textApi\nfrom .sections import sectionsApi\nfrom .display import displayApi\nfrom .search import searchApi\nfrom .data import getModulesData\nfrom .repo import checkoutRepo\n\n\n# SET UP A TF API FOR AN APP\n\n\nFROM_TF_METHODS = \"\"\"\n banner\n silentOn\n silentOff\n isSilent\n setSilent\n info\n warning\n error\n indent\n\"\"\".strip().split()\n\n\nclass App:\n def __init__(\n self,\n cfg,\n appName,\n appPath,\n commit,\n release,\n local,\n _browse,\n hoist=False,\n version=None,\n checkout=\"\",\n mod=None,\n locations=None,\n modules=None,\n api=None,\n setFile=\"\",\n silent=False,\n **configOverrides,\n ):\n \"\"\"Set up the advanced TF API.\n\n Parameters\n ----------\n appName: string\n The appname can be as simple as the name of an existing TF-app.\n The app should exist as a repository `app-`*appName* under\n [github.com/annotation](https://github.com/annotation).\n\n If there is a `/` in the *appName argument*,\n it is interpreted as a location on your system.\n\n If it points to a directory with a *config.yaml* in it,\n this config file will be read and interpreted as settings\n for the advanced API.\n If there is also a *app.py*, it will be imported as custom application code.\n And if there is a *static/display.css* there, it will be used\n for styling the display of corpus material.\n\n If there is no `config.yaml` there, it will be assumed that there are\n `.tf` data files in that location, and they will be loaded.\n The advanced API will work with default settings,\n based on the `.tf` data found.\n\n !!! hint \"appName:specifier, checkout=specifier\"\n You may want to load downloadable features from the internet,\n or you want to experiment with features you are developing.\n The specifiers let you use a specific point in the\n history of the app and data.\n\n *appName:specifier* is used for retrieving a TF-app (*code*).\n\n *checkout=specifier* is for retrieving the corpus itself (*data*).\n\n * `''` (empty string or absent) (**default**):\n use local data if it is present under `~/text-fabric-data`,\n otherwise use the latest release if there are releases online,\n otherwise, use the latest commit.\n * `latest`: use the latest release.\n If there are commits after the commit that has been tagged\n with the latest release, these will **not** be used.\n * `hot`: use the latest commit, even if it comes after the\n latest commit of the latest release.\n * *release tag*, e.g. `v1.3`: use exactly this release.\n More precisely, this is the commit that has been tagged\n with that release tag.\n * *commit hash*, e.g. `2d0ca1f593805af0c13c4a62ed7405b94d870045`:\n use exactly this commit.\n * `local`: use local data from your `~/text-fabric-data` directory\n if it is present, otherwise fail.\n * `clone`: use local data from your `~/github` directory\n if it is present, otherwise fail.\n\n For a demo, see\n [banks/repo](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/banks/repo.ipynb)\n\n hoist: dict, optional `False`\n If you pass `globals()`, the core API elements are made directly available\n as global names in your script or notebook:\n\n * `tf.core.nodefeature.NodeFeature` as `F` instead of `A.api.F`\n * `tf.core.locality.Locality` as `L` instead of `A.api.L`\n * `tf.core.text.Text` as `T` instead of `A.api.T`\n * and a few others (listed after executing the incantation)\n\n version: string, optional `None`\n If you do not want to work with the default version of your main corpus,\n you can specify a different version here.\n\n !!! caution \"Modules\"\n If you also ask for extra data modules by means of the `mod` argument,\n then the corresponding version of those modules will be chosen.\n Every properly designed data module must refer to a specific\n version of the main source!\n\n mod: string, optional `None`\n A comma-separated list of modules in one of the forms\n\n {org}/{repo}/{path}`\n\n or\n\n {org}/{repo}/{path}:specifier`\n\n All features of all those modules will be loaded.\n If they are not yet present, they will be downloaded from GitHub first.\n\n For example, there is an easter egg module on GitHub,\n and you can obtain it by\n\n mod='etcbc/lingo/easter/tf'`\n\n Here the `{org}` is `etcbc`, the `{repo}` is `lingo`,\n and the `{path}` is `easter/tf` under which\n version `c` of the feature `egg`\n is available in TF format.\n\n You can point to any such directory om the entire GitHub\n if you know that it contains relevant features.\n\n The specifier is as in `appName:specifier` and `checkData=specifier`.\n It is used to get data from a different point in the history.\n\n Your TF app might be configured to download specific modules.\n See `moduleSpecs` in the app's `config.yaml` file.\n\n !!! caution \"Let TF manage your text-fabric-data directory\"\n It is better not to fiddle with your `~/text-fabric-data` directory\n manually. Let it be filled with auto-downloaded data.\n You can then delete data sources and modules when needed,\n and have them redownloaded at your wish,\n without any hassle or data loss.\n\n locations, modules: string, optional `None`\n If you want to add other search locations for TF features manually,\n you can pass optional `locations` and `modules` parameters,\n which will be passed to the `tf.fabric.Fabric` call to the core of TF.\n\n !!! note \"More, not less\"\n Using these arguments will load features on top of the\n default selection of features.\n You cannot use these arguments to prevent features from being loaded.\n\n !!! note \"appName with `/`\"\n If you use the *appName* argument with a `/` in it,\n and it does not point to a TF app you have locally,\n it will be interpreted as a *locations* search path to find `.tf` files.\n It acts as the main `locations` argument,\n and will be combined with the `modules` argument.\n\n api: object, optional, `None`\n So far, the TF app will construct an advanced API\n with a more or less standard set of features\n loaded, and make that API avaible to you, under `A.api`.\n\n But you can also setup a core API yourself by using\n `tf.fabric.Fabric` with your choice of locations and modules:\n\n from tf.fabric import Fabric`\n TF = Fabric(locations=..., modules=...)`\n api = TF.load(features)`\n\n Here you have full control over what you load and what not.\n\n If you want the extra power of the TF app, you can wrap this `api`:\n\n A = use('xxxx', api=api)`\n\n !!! hint \"Unloaded features\"\n Some apps do not load all available features of the corpus by default.\n\n This happens when a corpus contains quite a number of features\n that most people never need.\n Loading them cost time and takes a lot of RAM.\n\n In the case where you need an available feature\n that has not been loaded, you can load it by demanding\n\n TF.load('feature1 feature2', add=True)`\n\n provided you have used the `hoist=globals()` parameter earlier.\n If not, you have to say\n\n A.api.TF.load('feature1 feature2', add=True)`\n\n setFile: string, optional, `None`\n The name of a file that contains condensed set information,\n produces with `tf.lib.writeSets`.\n These sets will be read and will become usable in TF queries.\n\n silent: boolean, optional `False`\n If `True`, nearly all output of this call will be suppressed,\n including the links to the loaded\n data, features, and the API methods.\n Error messages will still come through.\n\n configOverrides: key value pairs\n All values here will be used to override configuration settings\n that are specified in the app's `config.yaml` file.\n The list of those settings is spelled out in\n `tf.advanced.settings`.\n\n See Also\n --------\n tf.about.corpora: list of corpora with an official TF app\n tf.advanced.settings: description of what can go in a `config.yaml`\n \"\"\"\n\n self.context = None\n \"\"\"Result of interpreting all configuration options in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings.showContext\n \"\"\"\n\n mergeDict(cfg, configOverrides)\n\n for (key, value) in dict(\n isCompatible=cfg.get(\"isCompatible\", None),\n appName=appName,\n api=api,\n version=version,\n silent=silent,\n _browse=_browse,\n ).items():\n setattr(self, key, value)\n\n setattr(self, \"dm\", dm)\n setattr(self, \"dh\", dh)\n\n setAppSpecs(self, cfg)\n aContext = self.context\n version = aContext.version\n\n setDir(self)\n\n if not self.api:\n self.sets = None\n if setFile:\n sets = readSets(setFile)\n if sets:\n self.sets = sets\n console(f'Sets from {setFile}: {\", \".join(sets)}')\n specs = getModulesData(\n self, mod, locations, modules, version, checkout, silent\n )\n if specs:\n (locations, modules) = specs\n self.tempDir = f\"{self.repoLocation}/{TEMP_DIR}\"\n TF = Fabric(locations=locations, modules=modules, silent=silent or True)\n api = TF.load(\"\", silent=silent or True)\n if api:\n self.api = api\n excludedFeatures = aContext.excludedFeatures\n allFeatures = TF.explore(silent=silent or True, show=True)\n loadableFeatures = allFeatures[\"nodes\"] + allFeatures[\"edges\"]\n useFeatures = [\n f for f in loadableFeatures if f not in excludedFeatures\n ]\n result = TF.load(useFeatures, add=True, silent=silent or True)\n if result is False:\n self.api = None\n else:\n self.api = None\n\n if self.api:\n self.TF = self.api.TF\n for m in FROM_TF_METHODS:\n setattr(self, m, getattr(self.TF, m))\n self.getText = types.MethodType(getText, self)\n linksApi(self, silent)\n searchApi(self)\n sectionsApi(self)\n setAppSpecsApi(self, cfg)\n displayApi(self, silent)\n textApi(self)\n if hoist:\n # docs = self.api.makeAvailableIn(hoist)\n self.api.makeAvailableIn(hoist)\n if not silent:\n dh(\n \"
Text-Fabric API: names \"\n + outLink(\"N F E L T S C TF\", APIREF, title=\"doc\",)\n + \" directly usable

\"\n )\n\n silentOff = self.silentOff\n silentOff()\n else:\n if not _browse:\n console(\n f\"\"\"\nThere were problems with loading data.\nThe Text-Fabric API has not been loaded!\nThe app \"{appName}\" will not work!\n\"\"\",\n error=True,\n )\n\n def reinit(self):\n \"\"\"TF-Apps may override this method.\n It is called by `reuse`. Hence it needs to be present.\n \"\"\"\n\n pass\n\n def reuse(self, hoist=False):\n \"\"\"Re-initialize the app.\n\n The app's settings are read again, the app's code is re-imported,\n the app's stylesheets are applied again.\n But the data is left untouched, and no time-consuming reloading of data\n takes place.\n\n Handy when you are developing a new app and want to experiment with it\n without the costly re-loading of the data in every cycle.\n\n Parameters\n ----------\n hoist: boolean, optional `False`\n Same as in `App`.\n\n !!! hint \"the effect of the config settings\"\n If you are developing a TF app and need to see the effects of\n the configuration settings in detail, you can conveniently\n call `reuse` and `tf.advanced.settings.showContext` in tandem.\n \"\"\"\n\n aContext = self.context\n appPath = aContext.appPath\n appName = aContext.appName\n local = aContext.local\n commit = aContext.commit\n release = aContext.release\n version = aContext.version\n api = self.api\n\n cfg = findAppConfig(appName, appPath, commit, release, local, version=version)\n findAppClass(appName, appPath)\n\n setAppSpecs(self, cfg, reset=True)\n\n if api:\n TF = self.TF\n TF._makeApi()\n api = TF.api\n self.api = api\n self.reinit() # may be used by custom TF apps\n linksApi(self, True)\n searchApi(self)\n sectionsApi(self)\n setAppSpecsApi(self, cfg)\n displayApi(self, True)\n textApi(self)\n if hoist:\n api.makeAvailableIn(hoist)\n\n\ndef findApp(appName, checkoutApp, _browse, *args, silent=False, version=None, **kwargs):\n \"\"\"Find a TF app by name and initialize an object of its main class.\n\n Parameters\n ----------\n appName: string\n Either:\n\n * the plain name of an official TF app (e.g. `bhsa`, `oldbabylonian`)\n * or a local directory, containing at least one `/`:\n * if it points to a directory under which an unofficial app sits:\n that app will be loaded\n * else it is assumed that the local directory is a TF data directory:\n a vanilla app without extra configuration is initialized\n and this local directory is supplied for its `locations`\n parameter. This has the effect that the TF features here will\n be loaded.\n\n checkoutApp: string\n The checkout specifier for the app code. See `tf.advanced.app.App`.\n\n args: mixed\n Arguments that will be passed to the initializer of the `tf.advanced.app.App`\n class.\n\n kwargs: mixed\n Keyword arguments that will be passed to the initializer of the\n `tf.advanced.app.App` class.\n\n \"\"\"\n\n if not appName or \"/\" in appName:\n appPath = os.path.expanduser(appName) if appName else \"\"\n absPath = os.path.abspath(appPath)\n (commit, release, local) = (None, None, None)\n\n if os.path.isdir(absPath):\n (appDir, appName) = os.path.split(absPath)\n codePath = f\"{absPath}/{APP_CODE}\"\n if os.path.isdir(codePath):\n appDir = codePath\n appBase = \"\"\n else:\n console(f\"{absPath} is not an existing directory\", error=True)\n appBase = False\n appPath = appDir\n else:\n (commit, release, local, appBase, appDir) = checkoutRepo(\n _browse=_browse,\n org=ORG,\n repo=f\"app-{appName}\",\n folder=APP_CODE,\n checkout=checkoutApp,\n withPaths=True,\n keep=False,\n silent=silent,\n label=\"TF-app\",\n )\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n cfg = findAppConfig(appName, appPath, commit, release, local, version=version)\n version = cfg[\"provenanceSpec\"].get(\"version\", None)\n if not appBase and appBase != \"\":\n return None\n\n isCompatible = cfg[\"isCompatible\"]\n if isCompatible is None:\n appClass = App\n elif not isCompatible:\n return None\n else:\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n\n appClass = findAppClass(appName, appPath) or App\n try:\n app = appClass(\n cfg,\n appName,\n appPath,\n commit,\n release,\n local,\n _browse,\n *args,\n version=version,\n silent=silent,\n **kwargs,\n )\n except Exception as e:\n if appClass is not App:\n console(\n f\"There was an error loading TF-app {appName} from {appPath}\", error=True\n )\n console(repr(e), error=True)\n traceback.print_exc()\n console(\"Text-Fabric is not loaded\", error=True)\n return None\n return app\n","sub_path":"tf/advanced/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"218363814","text":"from pelican import signals, utils\nfrom collections import namedtuple, defaultdict, OrderedDict\nimport os\nimport logging\n\n\ndef getoldurl(generator):\n oldurls = []\n for article in generator.articles: # Loop through articles\n # If the article has the oldurl metadata\n if 'oldurl' in article.metadata.keys():\n if article.metadata['oldurl'] is not '':\n oldurls.append(\n (article.metadata['oldurl'], generator.settings[\"SITEURL\"] + \"/\" + article.url))\n generator.context['oldurls'] = oldurls\n\n\ndef generatehtaccess(generator, writer):\n oldurls = generator.context['oldurls']\n template = generator.get_template('htaccess')\n filename = '.htaccess'\n writer.write_file(filename, template, generator.context, oldurls=oldurls)\n\n\ndef register():\n # Registers the various functions to run during particar Pelican processes\n # Run after the article list has been generated\n signals.article_generator_finalized.connect(getoldurl)\n # Run after the articles have been written\n signals.article_writer_finalized.connect(generatehtaccess)\n","sub_path":"plugins/oldurl/oldurl.py","file_name":"oldurl.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"36007326","text":"\n\nfrom xai.brain.wordbase.verbs._decimate import _DECIMATE\n\n#calss header\nclass _DECIMATED(_DECIMATE, ):\n\tdef __init__(self,): \n\t\t_DECIMATE.__init__(self)\n\t\tself.name = \"DECIMATED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"decimate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_decimated.py","file_name":"_decimated.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"318355005","text":"\"\"\"\nConverts the raw CSV form to a Parquet form with just the columns we want\n\"\"\"\n\nimport mlflow\nimport pyspark\n\ndef etl_data():\n\n with mlflow.start_run() as mlrun:\n # define DBFS path for csv input - persistent\n ratings_csv = '/mlflow/ricardo/multistep/csv/ratings.csv'\n\n # define the DBFS path which we will write parquet to\n ratings_parquet_dir = '/mlflow/ricardo/multistep/parquet/ratings'\n \n spark = pyspark.sql.SparkSession.builder.getOrCreate()\n print('spark context -> ', spark.sparkContext.master) \n print(\"Converting ratings CSV %s to Parquet %s\" % (ratings_csv, ratings_parquet_dir))\n ratings_df = spark.read \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .csv(ratings_csv) \\\n .drop(\"timestamp\") # Drop unused column\n ratings_df.show()\n ratings_df.write.mode('overwrite').parquet(ratings_parquet_dir)\n\n print(\"Uploading Parquet ratings: %s\" % ratings_parquet_dir)\n mlflow.log_artifacts(ratings_parquet_dir, \"ratings-parquet-dir\")\n\n\nif __name__ == '__main__':\n etl_data()\n","sub_path":"examples/multistep_dbfs/etl_data.py","file_name":"etl_data.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"616853192","text":"# 791. Custom Sort String\n# 每日一题。\n\n# 2021/07/14\n# Runtime: 28 ms, faster than 85.88% of Python3 online submissions for Custom Sort String.\n# Memory Usage: 14.2 MB, less than 52.64% of Python3 online submissions for Custom Sort String.\n\n# 排序 O(nlgn)解法。官方解答有一种O(n)计数解法,很精妙。\n# 如果字符出现在order中,则将该字符的位置记录下来,如果否的话,它的位置记为26, 即为最后一位。\n\nclass Solution:\n def customSortString(self, order: str, str: str) -> str:\n key = {}\n for i, c in enumerate(order):\n key[c] = i\n for c in str:\n if c not in key:\n key[c] = 26\n return ''.join(sorted(str, key=lambda x: key[x]))\n","sub_path":"0791. Custom Sort String.py","file_name":"0791. Custom Sort String.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"453425863","text":"# encoding=utf-8\n\nimport requests\nimport datetime\nimport lxml\nimport os\nimport win32api\nimport win32gui\nimport win32con\nfrom bs4 import BeautifulSoup\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'\n}\nindex_url = \"https://bing.ioliu.cn\"\nindex_html = ''\nimage_html = ''\n\n\nrequest = requests.get(url=index_url, headers=headers)\nif request.status_code == 200:\n request.encoding = 'utf-8'\n index_html = request.text\n\nindex_soup = BeautifulSoup(index_html, 'lxml')\nitem_html = index_soup.find(class_='item')\n\nitem_soup = BeautifulSoup(str(item_html), 'lxml')\nitem_src = item_soup.find('a')\n\nimage_url = index_url + item_src['href']\nimage_request = requests.get(url=image_url, headers=headers)\nif image_request.status_code == 200:\n image_request.encoding = 'utf-8'\n image_html = image_request.text\n\nimage_soup = BeautifulSoup(image_html, 'lxml')\npic_src = image_soup.find(class_='target progressive__img progressive--not-loaded')\npic_src = pic_src['data-progressive']\n\ndown_request = requests.get(url=pic_src, headers=headers)\ndata = down_request.content\ntoday = datetime.date.today()\nfile = open(str(today)+'.jpg', 'wb')\nfile.write(data)\nfile.close()\n\n\nimage_path = os.getcwd() + '/' + str(today) + '.jpg'\n\nreg_key = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER, \"Control Panel\\\\Desktop\", 0, win32con.KEY_SET_VALUE)\n# 最后的参数:2拉伸,0居中,6适应,10填充,0平铺\nwin32api.RegSetValueEx(reg_key, \"WallpaperStyle\", 0, win32con.REG_SZ, \"2\")\n# 最后的参数:1表示平铺,拉伸居中等都是0\nwin32api.RegSetValueEx(reg_key, \"TileWallpaper\", 0, win32con.REG_SZ, \"0\")\n# 刷新桌面\nwin32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, image_path, win32con.SPIF_SENDWININICHANGE)\n","sub_path":"BingWallpaper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"619756219","text":"import shell\nimport util\nimport wordsegUtil\n\n############################################################\n# Problem 1b: Solve the segmentation problem under a unigram model\n\nclass SegmentationProblem(util.SearchProblem):\n def __init__(self, query, unigramCost):\n self.query = query\n self.unigramCost = unigramCost\n\n def startState(self):\n # BEGIN_YOUR_CODE (around 1 line of code expected)\n # Start state has previous cost of 0 and the full input string\n remaining_string = self.query\n return remaining_string\n # END_YOUR_CODE\n\n def isGoal(self, state):\n # BEGIN_YOUR_CODE (around 2 lines of code expected)\n # Goal state is when there is no more remaining string\n return state == ''\n # END_YOUR_CODE\n\n def succAndCost(self, state):\n # BEGIN_YOUR_CODE (around 10 lines of code expected)\n result = []\n remaining_string = state\n for i in range(1,len(remaining_string)+1):\n action = remaining_string[:i]\n cost = self.unigramCost(action)\n new_remaining_string = remaining_string[i:]\n result.append((action, new_remaining_string, cost))\n return result\n # END_YOUR_CODE\n\ndef segmentWords(query, unigramCost):\n if len(query) == 0:\n return ''\n\n ucs = util.UniformCostSearch(verbose=0)\n ucs.solve(SegmentationProblem(query, unigramCost))\n\n # BEGIN_YOUR_CODE (around 3 lines of code expected)\n return ' '.join(ucs.actions)\n # END_YOUR_CODE\n\n'''\nclass SegmentationProblem(util.SearchProblem):\n def __init__(self, query, unigramCost):\n self.query = query\n self.unigramCost = unigramCost\n\n def startState(self):\n # BEGIN_YOUR_CODE (around 1 line of code expected)\n # Start state has previous cost of 0 and the full input string\n remaining_string = self.query\n prev_cost = 0\n return (prev_cost, remaining_string)\n # END_YOUR_CODE\n\n def isGoal(self, state):\n # BEGIN_YOUR_CODE (around 2 lines of code expected)\n # Goal state is when there is no more remaining string\n return state[1] == ''\n # END_YOUR_CODE\n\n def succAndCost(self, state):\n # BEGIN_YOUR_CODE (around 10 lines of code expected)\n result = []\n prev_cost, remaining_string = state\n for i in range(1,len(remaining_string)+1):\n action = remaining_string[:i]\n cost = self.unigramCost(action)\n new_prev_cost = prev_cost + cost\n new_remaining_string = remaining_string[i:]\n new_state = (new_prev_cost, new_remaining_string)\n result.append((action, new_state, cost))\n return result\n # END_YOUR_CODE\n\ndef segmentWords(query, unigramCost):\n if len(query) == 0:\n return ''\n\n ucs = util.UniformCostSearch(verbose=0)\n ucs.solve(SegmentationProblem(query, unigramCost))\n\n # BEGIN_YOUR_CODE (around 3 lines of code expected)\n return ' '.join(ucs.actions)\n # END_YOUR_CODE\n'''\n\n\n############################################################\n# Problem 2b: Solve the vowel insertion problem under a bigram cost\n\nclass VowelInsertionProblem(util.SearchProblem):\n def __init__(self, queryWords, bigramCost, possibleFills):\n self.queryWords = queryWords\n self.bigramCost = bigramCost\n self.possibleFills = possibleFills\n\n def startState(self):\n # BEGIN_YOUR_CODE (around 1 line of code expected)\n previous_word = wordsegUtil.SENTENCE_BEGIN\n next_index = 0\n return (previous_word, next_index)\n # END_YOUR_CODE\n\n def isGoal(self, state):\n # BEGIN_YOUR_CODE (around 2 lines of code expected)\n return state[1] == (len(self.queryWords))\n # END_YOUR_CODE\n\n def succAndCost(self, state):\n # BEGIN_YOUR_CODE (around 10 lines of code expected)\n result = []\n previous_word, next_index = state\n # Make a copy of the remaining list of vowel-free words\n actions = list(self.possibleFills(self.queryWords[next_index]))\n if not actions:\n actions = [self.queryWords[next_index]]\n for action in actions:\n cost = self.bigramCost(previous_word, action)\n new_previous_word = action\n new_index = next_index + 1\n new_state = (new_previous_word, new_index)\n result.append((action, new_state, cost))\n return result\n # END_YOUR_CODE\n\ndef insertVowels(queryWords, bigramCost, possibleFills):\n # BEGIN_YOUR_CODE (around 3 lines of code expected)\n ucs = util.UniformCostSearch(verbose=0)\n ucs.solve(VowelInsertionProblem(queryWords, bigramCost, possibleFills))\n return ' '.join(ucs.actions)\n # END_YOUR_CODE\n\n############################################################\n# Problem 3b: Solve the joint segmentation-and-insertion problem\n\nclass JointSegmentationInsertionProblem(util.SearchProblem):\n def __init__(self, query, bigramCost, possibleFills):\n self.query = query\n self.bigramCost = bigramCost\n self.possibleFills = possibleFills\n\n def startState(self):\n # BEGIN_YOUR_CODE (around 2 lines of code expected)\n previous_word = wordsegUtil.SENTENCE_BEGIN\n remaining_string = self.query\n return (previous_word, remaining_string)\n # END_YOUR_CODE\n\n def isGoal(self, state):\n # BEGIN_YOUR_CODE (around 2 lines of code expected)\n return state[1] == ''\n # END_YOUR_CODE\n\n def succAndCost(self, state):\n # BEGIN_YOUR_CODE (around 15 lines of code expected)\n result = []\n previous_word, remaining_string = state\n # Iterate through all possible actions\n for i in range(1, len(remaining_string) + 1):\n vowel_free = remaining_string[:i]\n new_remaining = remaining_string[i:]\n possible_words = self.possibleFills(vowel_free)\n if possible_words:\n for word in possible_words:\n cost = self.bigramCost(previous_word, word)\n new_state = (word, new_remaining)\n result.append((word, new_state, cost))\n return result\n # END_YOUR_CODE\n\ndef segmentAndInsert(query, bigramCost, possibleFills):\n if len(query) == 0:\n return ''\n\n # BEGIN_YOUR_CODE (around 5 lines of code expected)\n ucs = util.UniformCostSearch(verbose=0)\n ucs.solve(JointSegmentationInsertionProblem(query, bigramCost, possibleFills))\n return ' '.join(ucs.actions)\n # END_YOUR_CODE\n\n############################################################\n\nif __name__ == '__main__':\n shell.main()\n","sub_path":"reconstruct/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"376712382","text":"items = {\r\n 0xF444F: (0xCCCC0101, '萨罗尼亚大图书馆'),\r\n 0xA060A5: (0xCCCC0101, '欧文之书1\\n“经过长年研究之后,终于告以完成。'),\r\n 0xA060A6: (0xCCCC0101, '启用此装置,\\n无论多么巨大的物体皆可被控制。'),\r\n 0xA060A7: (0xCCCC0101, '举例而言,就连我们所居住的这颗星球,\\n在理论上也是可以被控制的。”'),\r\n 0xA06109: (0xCCCC0101, '欧文之书2\\n “现在所进行的,是关于冷冻方面的研究。'),\r\n 0xA0610A: (0xCCCC0101, '通过完全冷冻物质,\\n可以将其从时间的流逝中分割开来……\\n就连人类也不例外。”'),\r\n 0xA0616D: (0xCCCC0101, '古代之书1\\n“四大神像\\n会将通过其间的人焚烧成灰。'),\r\n 0xA0616E: (0xCCCC0101, '开拓道路的钥匙是四颗利牙。\\n只有我们才可以自由通行。”'),\r\n 0xA061D1: (0xCCCC0101, '古代之书2\\n “光与暗,是由风、火、水、土\\n四个属性的水晶来维持平衡的。”'),\r\n 0xA06235: (0xCCCC0101, '古代之书3\\n “难道是因为我们过度使用了光之力吗?'),\r\n 0xA06236: (0xCCCC0101, '我们现在已经无法阻止开始失控的光了,\\n这个世界会就这么消亡吗……”'),\r\n 0xA06299: (0xCCCC0101, '古代之书4\\n “从暗之世界出现的四战士,\\n阻止了光的失控。'),\r\n 0xA0629A: (0xCCCC0101, '他们是谁?\\n到底从何而来?\\n但是多亏了他们,世界应该得救了……”'),\r\n 0xA062FD: (0xCCCC0101, '暗黑剑之书\\n “以暗黑剑为目标之人,请前往隐藏在\\n萨罗尼亚西方山脉中的法尔加巴德!”'),\r\n 0xA06361: (0xCCCC0101, '难懂的书\\n“时之齿轮的原理……所谓永动机……\\n将那反物质……在均衡状态……”'),\r\n 0xA063C5: (0xCCCC0101, '飞空艇之书\\n “年度最佳飞空艇'),\r\n 0xA063C6: (0xCCCC0101, '1:潜水艇 鹦鹉螺号\\n2:巨型船 无敌号\\n3:飞空艇 企业号”'),\r\n 0xA06429: (0xCCCC0101, '欧文之书3\\n“一切已准备就绪,\\n我将会离开萨罗尼亚前往实验基地。'),\r\n 0xA0642A: (0xCCCC0101, '对了!我将带上我的儿子,\\n同时也是我优秀的弟子德修一同前往。”'),\r\n}","sub_path":"Source/Hooks/FinalFantasyIII/Tools/msd/t22_04.py","file_name":"t22_04.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"259954351","text":"from flask import Flask, render_template, request, redirect, url_for, flash\r\nfrom flask import send_file, abort\r\nimport requests\r\nimport sqlite3\r\nfrom model import User, Profile, db, Tables, Employee, Sales\r\nfrom forms import RegisterForm, LoginForm, UploadForm, ProfileForm, TableForm, EmployeeForm, SalesForm\r\nfrom sqlalchemy import func #调用sqlalchemy模块中自带的函数\r\nimport pandas as pd\r\nfrom werkzeug.utils import secure_filename #获取文件名\r\nimport os #用于查看文件路径\r\nimport sys #指明所有查找module,package的路径\r\nimport unicodedata #是字符数据库,且为所有Unicode字符定义字符属性\r\nimport uuid #给字符产生唯一编码\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'mysecretkey'\r\n # pylint: disable=no-member\r\napp.config['UPLOAD_FOLDER'] = './static/uploads'\r\n#这个save file的文件夹一定是不能跟app.py平级,要在子文件夹内\r\n#app.config['UPLOAD_PATH'] = os.path.join(app.root_path, 'uploads')\r\nDATABASE = 'ssc.db'\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///mydb.db'\r\n#上边这行是数据库链接的配置,格式为(数据库:///用户名)\r\n\r\n#db = SQLAlchemy(app)\r\n# initalize app with database\r\ndb.init_app(app)\r\n\r\n\r\n@app.before_first_request\r\ndef before_first_request_func():\r\n db.create_all()\r\n\r\n#这个功能的作用就是在表单进行第一个命令之前,把db先建了;然后U could delete this function after first-run\r\n\r\n@app.route('/download/')\r\ndef downloadFile(filename):\r\n path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n return send_file(path, as_attachment=True)\r\n#为了下载文件,记得在app.py里from flask import send_file\r\n\r\n@app.route(\"/register\", methods=['Get', 'Post'])\r\ndef register():\r\n form = RegisterForm()\r\n if form.validate_on_submit():\r\n try:\r\n user = User(form.name.data, form.email.data, form.password.data, form.age.data,form.gender.data,form.feedback.data,\r\n form.experience.data,form.checkbox.data)\r\n\r\n with sqlite3.connect(DATABASE) as con:\r\n cur = con.cursor()\r\n cur.execute(\"INSERT INTO user1 (name,email,password,age,gender,feedback,experience,checkbox) VALUES (?,?,?,?,?,?,?,?)\",\r\n (user.name, user.email, user.password, user.age,user.gender,user.feedback,user.experience,user.checkbox))\r\n con.commit()\r\n flash('Registered Successfully!', 'success')\r\n return redirect(url_for('login'))\r\n except Exception as e:\r\n con.rollback()\r\n flash(f'Unknow error!\\n{str(e)}', 'danger')\r\n return render_template('assignment7.html', title='Register', form=form)\r\n\r\n@app.route(\"/login\", methods=['Get', 'Post'])\r\ndef login():\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n try:\r\n with sqlite3.connect(DATABASE) as con:\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM user1 WHERE email = ? and password = ?\",\r\n [form.email.data, form.password.data])\r\n rows = cur.fetchall()\r\n print(rows) \r\n if rows:\r\n flash('Login Successfully!', 'success')\r\n return render_template('display.html', rows=rows)\r\n else:\r\n flash(\r\n 'Login Unsuccessful. Please check email and password', 'danger')\r\n except Exception as e:\r\n con.rollback()\r\n flash(f'Unknow error!\\n{str(e)}', 'danger')\r\n return render_template('loginn.html', title='Login', form=form)\r\n\r\n@app.route(\"/display\")\r\ndef display():\r\n with sqlite3.connect(DATABASE) as con:\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM user1\")\r\n rows = cur.fetchall()\r\n print(rows)\r\n return render_template('display.html', title='Login', rows=rows)\r\n\r\n@app.route(\"/\")\r\ndef sichen(): #function name\r\n return render_template('sichen.html', title=\"Sichen\") \r\n'''\r\n@app.route(\"/tags\")\r\ndef tags():\r\n return render_template(\"Main page.html\", title=\"tags\")\r\n\r\n@app.route(\"/register\", methods=['Get','Post'])\r\ndef register():\r\n user = None\r\n if request.method == \"POST\":\r\n name = request.form['name']\r\n email = request.form['email']\r\n password = request.form['password']\r\n user = User(name, email, password) #build a model to store data\r\n return render_template(\"register.html\", tile =\"Register\", user=user)\r\n\r\n@app.route(\"/sorting\")\r\ndef sorting():\r\n return render_template(\"sorting.html\", title=\"Sorting\")\r\n\r\n@app.route(\"/link\")\r\ndef link():\r\n return render_template(\"Link.html\", title=\"7.LINK\")\r\n\r\n@app.route(\"/reload\")\r\ndef reload():\r\n return render_template(\"Reload.html\", title=\"1.Reload\")\r\n\r\n@app.route(\"/css_style\")\r\ndef css_style():\r\n return render_template(\"CSS Style.html\", title=\"3.CSS STYLE\")\r\n\r\n@app.route(\"/calculater\")\r\ndef calculater():\r\n return render_template(\"Working_Calculater.html\", title=\"Calculater\")\r\n\r\n@app.route(\"/Swap_numbers\")\r\ndef Swap_numbers():\r\n return render_template(\"Swap_numbers.html\", title=\"Swap_numbers\")\r\n\r\n@app.route(\"/onlineshop\")\r\ndef onlineshop():\r\n return render_template(\"OnlineShop.html\", title=\"OnlineShop\")\r\n\r\n@app.route(\"/dress\")\r\ndef dress():\r\n return render_template(\"Dress.html\", title=\"$100-$800\")\r\n\r\n@app.route(\"/coat\")\r\ndef coat():\r\n return render_template(\"Coat.html\", title=\"$1200-$3000\")\r\n\r\n@app.route(\"/trousers\")\r\ndef trousers():\r\n return render_template(\"Trousers.html\", title=\"$150-$1800\")\r\n\r\n@app.route(\"/suit\")\r\ndef suit():\r\n return render_template(\"suit.html\", title=\"$1200-$5000\")\r\n\r\n@app.route(\"/windbreaker\")\r\ndef windbreaker():\r\n return render_template(\"Windbreaker.html\", title=\"$2500-$9000\")\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return render_template('404.html'), 404\r\n\r\n@app.route(\"/login\")\r\ndef login():\r\n return render_template(\"login.html\")\r\n\r\n@app.route(\"/linear_regression\")\r\ndef linear_regression():\r\n return render_template(\"Linear_regression.html\")\r\n\r\n@app.route(\"/forums\")\r\ndef forums():\r\n return render_template(\"Forums.html\")\r\n'''\r\n@app.route(\"/profile\", methods=[\"Get\", \"Post\"])\r\n#get是首次获取表单的方法,上面数值为空(默认值),获取的URL上也没有数值;\r\n#post是用户提交信息时获取表单的方法,上面有数值,但不会反映在URL上,所以URL始终不变\r\ndef profile():\r\n my_form = ProfileForm() #初始化表单\r\n my_data = Profile() #初始化数据,没这行,数据无法放在form中,给该表单提交的数据起名为my_data,即first_name那列数据就叫my_data.first_name\r\n my_data.remove_none_values() #call the function\r\n if my_form.validate_on_submit(): #意思是如果表单提交成功\r\n my_data.first_name = request.form.get('first_name')\r\n my_data.last_name = request.form.get('last_name')\r\n #print(\"first_name\", my_data.first_name)\r\n #print(\"last_name\", my_data.last_name)\r\n file = request.files.get('file_photo')\r\n if file:\r\n orig_filename = secure_filename(file.filename)\r\n new_filename = str(uuid.uuid1())#生成uuid\r\n my_data.file_photo_filename = orig_filename #To save the orignal file name\r\n my_data.file_photo_code = new_filename \r\n #上面这行是为了存储uuid,目的是如果不同用户上传的不用文件,起了同一个名,靠uuid来区分\r\n\r\n # save to upload folder\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], new_filename))\r\n \r\n #save to database\r\n db.session.add(my_data)\r\n db.session.commit()\r\n print(\"my_data\", my_data.id)\r\n\r\n #redirect to display page\r\n return redirect('/profile/' + str(my_data.id)) #这个意思是每个数据的特定URL,比如profile/5...\r\n\r\n return render_template(\"profile.html\", my_form = my_form, my_data = my_data)\r\n\r\n\r\n@app.route(\"/profile/\", methods=[\"Get\"])#即每个上传的数据都产出特定的URL\r\ndef profile_by_id(id): #这个function必须叫这个名,不然不能RUN出来\r\n my_data = Profile.query.filter_by(id=id).first()#只返回出每个id的第一个数据,其实也就一个\r\n if my_data == None:\r\n abort(404) #记得要from flask import abort;这个功能是调用错误界面的\r\n return render_template(\"profile_view.html\", my_data = my_data)\r\n\r\n\r\n@app.route(\"/tables\")\r\ndef tables():\r\n my_data = Sales.query.all()\r\n chart_data = []\r\n chart_data2 = []\r\n for sal in my_data:\r\n chart_data.append(sal.to_x_y()) #append()函数是在相应的数组后加上元素,后面sal是定义好的类,to_x_y是在model.py里sal类下面定义的function\r\n chart_data2.append(sal.to_x_y2())\r\n return render_template('table_data.html', my_data=my_data, chart_data = chart_data, chart_data2=chart_data2)#pass data,一般会让等号两边的名字一样\r\n\r\n@app.route(\"/tables2\")\r\ndef tables2():\r\n my_data = Sales.query.all()\r\n chart_data = []\r\n chart_data2 = []\r\n for sal in my_data:\r\n chart_data.append(sal.to_x_y()) #append()函数是在相应的数组后加上元素,后面sal是定义好的类,to_x_y是在model.py里sal类下面定义的function\r\n chart_data2.append(sal.to_x_y2())\r\n return render_template('table2_data.html', my_data=my_data, chart_data = chart_data, chart_data2=chart_data2)#pass data,一般会让等号两边的名字一样\r\n\r\n@app.route(\"/tables/\")\r\ndef tables_by_id(id): \r\n my_data = Tables.query.filter_by(id=id).first()\r\n if my_data == None:\r\n abort(404)\r\n return render_template(\"table_data.html\", my_data=my_data)\r\n'''\r\n@app.route(\"/employee\", methods=[\"Get\", \"Post\"])\r\ndef employee():\r\n my_form = EmployeeForm()\r\n if my_form.validate_on_submit():\r\n file_csv = request.files.get('file_csv')\r\n if file_csv:\r\n file_full_path = (os.path.join(app.config['UPLOAD_FOLDER'], file_csv.filename))\r\n file_csv.save(file_full_path) #save to the upload folder\r\n #load the data in the table using pandas\r\n df = pd.read_csv(file_full_path) #read the file through pd(pandas)\r\n employee_list_raw = df.to_dict('records')#意为整体构成一个列表,内层是将原始数据的每行提取出来形成字典\r\n employee_list = []\r\n for curr_emp in employee_list_raw:\r\n emp = Employee.from_dict(curr_emp)\r\n #emp.employee_id = curr_emp['EMPLOYEE_ID'] 这行里前面小写的是match model的Employee,后面大写的是match file里列名\r\n employee_list.append(emp) #()方法向列表末尾添加新的对象(元素)\r\n db.session.bulk_save_objects(employee_list)#批量save to db\r\n # db.session.add(emp) \r\n # 上面是数据添加到db里的employee table\r\n db.session.commit() #提交保存到数据库\r\n return render_template('employee.html', my_form=my_form)\r\n '''\r\n@app.route(\"/employee\", methods=['Get', 'Post'])\r\ndef employee():\r\n my_form = EmployeeForm()\r\n # convert to list\r\n\r\n if my_form.validate_on_submit(): # my_form.submitted()\r\n # file we are importing\r\n file_csv = request.files.get('file_csv')\r\n\r\n if file_csv:\r\n file_full_path = os.path.join(\r\n app.config['UPLOAD_FOLDER'], file_csv.filename)\r\n # print(\"file_full_path\", file_full_path)\r\n\r\n # save to upload folder\r\n file_csv.save(file_full_path)\r\n\r\n # load the data in the table using pandas\r\n df = pd.read_csv(file_full_path)\r\n\r\n # print(\"raw_data\", df.iloc[0])\r\n\r\n # print(\"shape\", df.shape)\r\n employee_list_raw = df.to_dict('records')\r\n\r\n # print(\"dictionary\", employee_list_raw)\r\n\r\n employee_list = []\r\n for curr_emp in employee_list_raw:\r\n emp = Employee.from_dict(curr_emp)\r\n employee_list.append(emp)\r\n # db.session.add(emp)\r\n # db.session.commit()\r\n\r\n print(\"employee_list_count\", len(employee_list))\r\n\r\n # save t0 DB\r\n db.session.bulk_save_objects(employee_list)\r\n db.session.commit()\r\n\r\n # test query\r\n e_list = Employee.query.limit(5).all()\r\n print(\"*******\")\r\n print(e_list)\r\n print(\"*******\")\r\n\r\n # send us to the display page\r\n # return redirect(\"/employee/\" + str(my_data.id))\r\n\r\n return render_template('employee.html', my_form=my_form)\r\n\r\n\r\n@app.route(\"/sales\", methods=['Get', 'Post'])\r\ndef sales():\r\n my_form = SalesForm()\r\n\r\n if my_form.validate_on_submit(): \r\n file_csv = request.files.get('file_csv')\r\n if file_csv:\r\n file_full_path = os.path.join(\r\n app.config['UPLOAD_FOLDER'], file_csv.filename)\r\n file_csv.save(file_full_path)\r\n\r\n # load the data in the table using pandas\r\n df = pd.read_csv(file_full_path)\r\n\r\n sales_list_raw = df.to_dict('records')\r\n\r\n sales_list = []\r\n for curr_sal in sales_list_raw:\r\n sal = Sales.from_dict(curr_sal)\r\n sales_list.append(sal)\r\n print(\"sales_list_count\", len(sales_list))\r\n\r\n db.session.bulk_save_objects(sales_list)\r\n db.session.commit()\r\n\r\n return render_template('Sales.html', my_form=my_form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"Car-Purchase-Analysis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"454325457","text":"from flask import Flask, render_template, request\nimport alpaca_trade_api as tradeapi\nfrom config import *\nimport requests, json\n\napp = Flask(__name__)\n\napi = tradeapi.REST(API_KEY, SECRET_KEY, base_url=BASE_URL)\n\n@app.route('/')\ndef dashboard():\n orders = api.list_orders(status='all', limit=50)\n positions = api.list_positions()\n\n return render_template('dashboard.html', alpaca_orders=orders, alpaca_positions=positions)\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n webhook_message = json.loads(request.data)\n\n if webhook_message['passphrase'] != WEBHOOK_PASSPHRASE:\n return \"no\"\n\n price = webhook_message['strategy']['order_price']\n quantity = webhook_message['strategy']['order_contracts']\n symbol = webhook_message['ticker']\n side = webhook_message['strategy']['order_action']\n\n order = api.submit_order(symbol, quantity, side, 'limit', 'gtc', limit_price=price)\n\n chat_message = {\n \"content\":f\"tradingview strategy alert triggered: {quantity} {symbol} @ {price}\"\n }\n\n requests.post(DISCORD_URL, json=chat_message)\n\n return webhook_message\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"477132877","text":"# edge case: \"a\"\n\n# method 3: better template of sliding window\nfrom collections import defaultdict\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n left = 0\n window = defaultdict(int)\n res = 0\n for right, c in enumerate(s):\n window[c] += 1\n while window[c] > 1:\n window[s[left]] -= 1\n if window[s[left]] == 0:\n del window[s[left]]\n left += 1\n res = max(res, right-left+1)\n return res\n \n\n\n# method 2: sliding window, record the last appeared index of all characters\n# # watch out: characters saved in last_appeared might not be in the window!!!\n# time/space O(n)\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n start, end = 0, 0 # window is [start, end)\n n = len(s)\n last_appeared = {} # {character: last appeared index}, \n longest = 0\n while start < n and end < n:\n if s[end] in last_appeared:\n longest = max(end - start, longest)\n start = max(start, last_appeared[s[end]] + 1) # max is needed\n last_appeared[s[end]] = end\n end += 1\n longest = max(longest, end - start) # easy to forget!\n return longest\n\n \n# Method 1: brute force, check every string starting from s[i]\n# O(n^2), Time Limit Exceeded\nclass Solution1(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\" \n ans = 0\n for i in range(len(s)):\n _set = set()\n for j in range(i, len(s)):\n if s[j] not in _set:\n _set.add(s[j])\n else:\n break\n ans = max(ans, len(_set))\n return ans\n\n\"\"\"\nGiven a string, find the length of the longest substring without repeating characters.\n\nExample 1:\n\nInput: \"nabcabcnbb\"\nOutput: 4 \nExplanation: The answer is \"abcn\", with the length of 4. \n\"\"\"\n","sub_path":"0003. Longest Substring Without Repeating Characters.py","file_name":"0003. Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34455164","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.views import generic\nfrom .forms import ClothesCreateForm, OutfitCreateForm, ClothesColorFormset\nfrom accounts.models import User\nfrom .models import ParentCategory, Category, Clothes, Outfit, ClothesColor\nfrom django.urls import reverse_lazy\nfrom django.http import JsonResponse, QueryDict\nfrom django.utils import timezone\nfrom django.conf import settings\n\n# Create your views here.\n\n\nclass CreateClothes(LoginRequiredMixin, generic.CreateView):\n model = Clothes\n form_class = ClothesCreateForm\n template_name = \"closet/add_clothes.html\"\n\n def form_valid(self, form):\n clothes = form.save(commit=False)\n # Add user and date created to a clothes\n user = self.request.user\n clothes.owner = user\n clothes.created_at = timezone.now()\n\n # Name a clothes if user doesn't\n if not clothes.name:\n category = clothes.category\n count_clothes = user.clothes.filter(category=category).count()\n clothes.name = f\"{category} {count_clothes + 1}\"\n\n clothes.save()\n\n # Get cropping positions\n x = float(self.request.POST.get(\"x\"))\n y = float(self.request.POST.get(\"y\"))\n w = float(self.request.POST.get(\"width\"))\n h = float(self.request.POST.get(\"height\"))\n\n # Crop if the cropping positions\n if x != 0 and y != 0 and w != 0 and h != 0:\n clothes.crop_picture(x, y, w, h)\n clothes.extract_color()\n else:\n # Create 3 color objects if no cropped image and colors\n if not clothes.colors.all():\n for i in range(3):\n ClothesColor.objects.create(\n clothes=clothes, code=\"rgba(255, 255, 255, 1)\"\n )\n\n messages.info(\n self.request, f\"Added {clothes.name} successfully.\",\n )\n return redirect(\"closet:clothes\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"add_or_edit\"] = \"Add\"\n return context\n\n\n# def add_clothes(request):\n# form = ClothesCreateForm(\n# request.POST or None, files=request.FILES\n# ) # request.FILESが必要\n# context = {\"form\": form, \"add_or_edit\": \"Add\"}\n#\n# if request.method == \"POST\" and form.is_valid():\n# clothes = form.save(commit=False)\n# Add user and date created in a clothes\n# user = self.request.user\n# clothes.owner = user\n# clothes.created_at = timezone.now()\n#\n# if not clothes.name:\n# category = clothes.category\n# count_clothes = user.clothes.filter(category=category).count()\n# clothes.name = f\"{category} {count_clothes + 1}\"\n#\n# clothes.save()\n#\n# x = float(self.request.POST.get(\"x\"))\n# y = float(self.request.POST.get(\"y\"))\n# w = float(self.request.POST.get(\"width\"))\n# h = float(self.request.POST.get(\"height\"))\n#\n# clothes.crop_picture(x, y, w, h)\n# clothes.extract_color()\n#\n# messages.info(\n# self.request,\n# f\"{clothes.owner.username} added {clothes.name} successfully.\",\n# )\n#\n# formset = ColorFormset(request.POST, instance=clothes)\n# if formset.is_valid():\n# formset.save()\n# return redirect(\"app:index\")\n#\n# # エラーメッセージつきのformsetをテンプレートへ渡すため、contextに格納\n# else:\n# context[\"formset\"] = formset\n#\n# return redirect(\"closet:clothes\")\n#\n# # GETのとき\n# else:\n# # 空のformsetをテンプレートへ渡す\n# context[\"formset\"] = ColorFormset()\n#\n# return render(request, \"closet/add_clothes.html\", context)\n\n\n@login_required\ndef edit_clothes(request, pk):\n clothes = get_object_or_404(Clothes, pk=pk)\n form = ClothesCreateForm(\n request.POST or None, files=request.FILES or None, instance=clothes\n )\n formset = ClothesColorFormset(\n request.POST or None,\n instance=clothes,\n initial=clothes.colors.order_by(\"pk\").all(),\n )\n\n if request.method == \"POST\" and form.is_valid() and formset.is_valid():\n saved_clothes = form.save()\n x = float(request.POST.get(\"x\"))\n y = float(request.POST.get(\"y\"))\n w = float(request.POST.get(\"width\"))\n h = float(request.POST.get(\"height\"))\n\n if x != 0 and y != 0 and w != 0 and h != 0:\n saved_clothes.crop_picture(x, y, w, h)\n saved_clothes.extract_color()\n\n messages.info(\n request, f\"Updated {saved_clothes.name} successfully.\",\n )\n # for instance in formset.save(commit=False):\n # # ... do something with m2m relationships ...\n\n # Save the order of a formset of clothes colors\n for ordered_form in formset.ordered_forms:\n ordered_form.instance.order = ordered_form.cleaned_data[\"ORDER\"]\n ordered_form.instance.save()\n # return redirect(\"closet:edit_clothes\", pk=pk)\n return redirect(\"closet:clothes\")\n\n context = {\n \"form\": form,\n \"formset\": formset,\n \"clothes\": clothes,\n \"add_or_edit\": \"Edit\",\n }\n\n return render(request, \"closet/add_clothes.html\", context)\n\n\nclass UserClothes(LoginRequiredMixin, generic.ListView):\n template_name = \"closet/clothes_list.html\"\n\n def post(self, request):\n clothes_pks = request.POST.getlist(\"delete\")\n Clothes.objects.filter(pk__in=clothes_pks).delete()\n return redirect(\"closet:clothes\")\n\n def get_queryset(self):\n return self.request.user.clothes.order_by(\"-id\").all()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"who\"] = self.request.user.username\n return context\n\n\nclass PublishedClothes(generic.DetailView):\n model = User\n template_name = \"closet/clothes_list.html\"\n\n slug_field = \"username\"\n slug_url_kwarg = \"username\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = User.objects.get(username=self.kwargs[\"username\"])\n context[\"object_list\"] = user.clothes.filter(publish=True).all()\n context[\"who\"] = self.kwargs[\"username\"]\n return context\n\n\n# class EditClothes(LoginRequiredMixin, generic.UpdateView):\n# model = Clothes\n# form_class = ClothesCreateForm\n# template_name = \"closet/add_clothes.html\"\n#\n# def form_valid(self, form):\n# clothes = form.save()\n#\n# x = float(self.request.POST.get(\"x\"))\n# y = float(self.request.POST.get(\"y\"))\n# w = float(self.request.POST.get(\"width\"))\n# h = float(self.request.POST.get(\"height\"))\n#\n# clothes.crop_picture(x, y, w, h)\n# clothes.extract_color()\n#\n# messages.info(\n# self.request,\n# f\"{clothes.owner.username} updated {clothes.name} successfully.\",\n# )\n# return redirect(\"closet:clothes\")\n#\n# def get_context_data(self, **kwargs):\n# context = super().get_context_data(**kwargs)\n# context[\"add_or_edit\"] = \"Edit\"\n# return context\n\n\nclass CreateOutfit(LoginRequiredMixin, generic.CreateView):\n model = Outfit\n form_class = OutfitCreateForm\n template_name = \"closet/set_outfit.html\"\n\n def get_form_kwargs(self):\n kwargs = super(CreateOutfit, self).get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n # Add user and date created for an outfit\n def form_valid(self, form):\n outfit = form.save(commit=False)\n user = self.request.user\n outfit.owner = user\n outfit.created_at = timezone.now()\n outfit.set_name()\n outfit.save()\n return redirect(\"closet:outfits\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"set_or_edit\"] = \"Set\"\n return context\n\n\nclass UserOutfits(LoginRequiredMixin, generic.ListView):\n template_name = \"closet/outfits_list.html\"\n\n def post(self, request):\n outfits_pks = request.POST.getlist(\"delete\")\n Outfit.objects.filter(pk__in=outfits_pks).delete()\n return redirect(\"closet:outfits\")\n\n def get_queryset(self):\n return self.request.user.outfits.all()\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"who\"] = self.request.user.username\n return context\n\n\nclass PublishedOutfits(generic.DetailView):\n model = User\n template_name = \"closet/outfits_list.html\"\n\n slug_field = \"username\"\n slug_url_kwarg = \"username\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = User.objects.get(username=self.kwargs[\"username\"])\n context[\"object_list\"] = user.outfits.filter(publish=True).all()\n context[\"who\"] = self.kwargs[\"username\"]\n return context\n\n\nclass EditOutfit(LoginRequiredMixin, generic.UpdateView):\n model = Outfit\n form_class = OutfitCreateForm\n success_url = reverse_lazy(\"closet:outfits\")\n template_name = \"closet/set_outfit.html\"\n\n def get_form_kwargs(self):\n kwargs = super(EditOutfit, self).get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"set_or_edit\"] = \"Edit\"\n return context\n","sub_path":"services/web/closet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466867597","text":"import urllib.parse\n\nfrom craton.tests.functional import TestCase\n\n\nclass RegionTests(TestCase):\n def setUp(self):\n super(RegionTests, self).setUp()\n self.cloud = self.create_cloud()\n\n def create_cloud(self):\n return super(RegionTests, self).create_cloud(\n name='cloud-1',\n variables={'version': 'x'},\n )\n\n def create_region(self, name, variables=None):\n return super(RegionTests, self).create_region(\n name=name,\n cloud=self.cloud,\n variables=variables\n )\n\n\nclass APIV1RegionTest(RegionTests):\n \"\"\"Test cases for /region calls.\n One set of data for the test is generated by fake data generateion\n script during test module setup.\n \"\"\"\n\n def test_create_region_full_data(self):\n # Test with full set of allowed parameters\n values = {\"name\": \"region-new\",\n \"note\": \"This is region-new.\",\n \"cloud_id\": self.cloud['id'],\n \"variables\": {\"a\": \"b\"}}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(201, resp.status_code)\n self.assertIn('Location', resp.headers)\n self.assertEqual(\n resp.headers['Location'],\n \"{}/{}\".format(url, resp.json()['id'])\n )\n self.assertEqual(values['name'], resp.json()['name'])\n\n def test_create_region_without_variables(self):\n values = {\"name\": \"region-two\",\n \"note\": \"This is region-two\",\n \"cloud_id\": self.cloud['id']}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(201, resp.status_code)\n self.assertIn('Location', resp.headers)\n self.assertEqual(\n resp.headers['Location'],\n \"{}/{}\".format(url, resp.json()['id'])\n )\n self.assertEqual(\"region-two\", resp.json()['name'])\n\n def test_create_region_with_no_name_fails(self):\n values = {\"note\": \"This is region one.\", \"cloud_id\": self.cloud['id']}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(resp.status_code, 400)\n err_msg = (\n \"The request included the following errors:\\n\"\n \"- 'name' is a required property\"\n )\n self.assertEqual(resp.json()['message'], err_msg)\n\n def test_create_region_with_no_cloud_id_fails(self):\n values = {\"name\": \"I don't work at all, you know.\"}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(resp.status_code, 400)\n err_msg = (\n \"The request included the following errors:\\n\"\n \"- 'cloud_id' is a required property\"\n )\n self.assertEqual(resp.json()['message'], err_msg)\n\n def test_create_region_with_duplicate_name_fails(self):\n self.create_region(\"ORD135\")\n\n values = {\"name\": \"ORD135\", \"cloud_id\": self.cloud['id']}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(409, resp.status_code)\n\n def test_create_region_with_extra_id_property_fails(self):\n values = {\"name\": \"test\", 'cloud_id': self.cloud['id'], \"id\": 101}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(resp.status_code, 400)\n msg = (\n \"The request included the following errors:\\n\"\n \"- Additional properties are not allowed ('id' was unexpected)\"\n )\n self.assertEqual(resp.json()['message'], msg)\n\n def test_create_region_with_extra_created_at_property_fails(self):\n values = {\"name\": \"test\", 'cloud_id': self.cloud['id'],\n \"created_at\": \"some date\"}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(resp.status_code, 400)\n msg = (\n \"The request included the following errors:\\n\"\n \"- Additional properties are not allowed \"\n \"('created_at' was unexpected)\"\n )\n self.assertEqual(resp.json()['message'], msg)\n\n def test_create_region_with_extra_updated_at_property_fails(self):\n values = {\"name\": \"test\", 'cloud_id': self.cloud['id'],\n \"updated_at\": \"some date\"}\n url = self.url + '/v1/regions'\n resp = self.post(url, data=values)\n self.assertEqual(resp.status_code, 400)\n msg = (\n \"The request included the following errors:\\n\"\n \"- Additional properties are not allowed \"\n \"('updated_at' was unexpected)\"\n )\n self.assertEqual(resp.json()['message'], msg)\n\n def test_region_create_missing_all_properties_fails(self):\n url = self.url + '/v1/regions'\n region = self.post(url, data={})\n self.assertEqual(400, region.status_code)\n msg = (\n \"The request included the following errors:\\n\"\n \"- 'cloud_id' is a required property\\n\"\n \"- 'name' is a required property\"\n )\n self.assertEqual(region.json()['message'], msg)\n\n def test_regions_get_all(self):\n self.create_region(\"ORD1\")\n self.create_region(\"ORD2\")\n url = self.url + '/v1/regions'\n resp = self.get(url)\n self.assertEqual(200, resp.status_code)\n self.assertEqual(2, len(resp.json()))\n\n def test_regions_get_all_with_details(self):\n self.create_region('ORD1', variables={'a': 'b'})\n self.create_region('ORD2', variables={'c': 'd'})\n url = self.url + '/v1/regions?details=all'\n resp = self.get(url)\n self.assertEqual(200, resp.status_code)\n regions = resp.json()['regions']\n self.assertEqual(2, len(regions))\n for region in regions:\n self.assertTrue('variables' in region)\n for region in regions:\n if region['name'] == 'ORD1':\n self.assertEqual({'a': 'b', 'version': 'x'},\n region['variables'])\n if region['name'] == 'ORD2':\n self.assertEqual({'c': 'd', 'version': 'x'},\n region['variables'])\n\n def test_regions_get_all_with_name_filter(self):\n self.create_region(\"ORD1\")\n self.create_region(\"ORD2\")\n url = self.url + '/v1/regions?name=ORD1'\n resp = self.get(url)\n self.assertEqual(200, resp.status_code)\n regions = resp.json()['regions']\n self.assertEqual(1, len(regions))\n self.assertEqual('ORD1', regions[0]['name'])\n\n def test_regions_get_all_for_cloud(self):\n for i in range(2):\n self.create_region(\"ORD{}\".format(str(i)))\n url = self.url + '/v1/regions?cloud_id={}'.format(self.cloud['id'])\n resp = self.get(url)\n self.assertEqual(200, resp.status_code)\n regions = resp.json()['regions']\n self.assertEqual(2, len(regions))\n self.assertEqual(['ORD0', 'ORD1'], [r['name'] for r in regions])\n\n def test_region_with_non_existing_filters(self):\n self.create_region(\"ORD1\")\n url = self.url + '/v1/regions?name=idontexist'\n resp = self.get(url)\n self.assertEqual(404, resp.status_code)\n\n def test_region_get_details_for_region(self):\n regvars = {\"a\": \"b\", \"one\": \"two\"}\n region = self.create_region(\"ORD1\", variables=regvars)\n url = self.url + '/v1/regions/{}'.format(region['id'])\n resp = self.get(url)\n region = resp.json()\n self.assertEqual(region['name'], 'ORD1')\n\n def test_region_get_details_has_resolved_vars(self):\n regvars = {\"a\": \"b\", \"one\": \"two\"}\n region = self.create_region(\"ORD1\", variables=regvars)\n url = self.url + '/v1/regions/{}'.format(region['id'])\n resp = self.get(url)\n region = resp.json()\n self.assertEqual(region['name'], 'ORD1')\n expected = {\"a\": \"b\", \"one\": \"two\", \"version\": \"x\"}\n self.assertEqual(expected, region['variables'])\n\n def test_region_get_details_with_unresolved_vars(self):\n regvars = {\"a\": \"b\", \"one\": \"two\"}\n region = self.create_region(\"ORD1\", variables=regvars)\n r_id = region['id']\n url = self.url + '/v1/regions/{}?resolved-values=false'.format(r_id)\n resp = self.get(url)\n region = resp.json()\n self.assertEqual(region['name'], 'ORD1')\n self.assertEqual(regvars, region['variables'])\n\n\nclass TestPagination(RegionTests):\n\n def setUp(self):\n super(TestPagination, self).setUp()\n self.regions = [self.create_region('region-{}'.format(i))\n for i in range(0, 61)]\n self.addCleanup(self.delete_regions, self.regions)\n\n def test_list_first_thirty_regions(self):\n url = self.url + '/v1/regions'\n response = self.get(url)\n self.assertSuccessOk(response)\n json = response.json()\n self.assertIn('regions', json)\n self.assertEqual(30, len(json['regions']))\n self.assertListEqual([r['id'] for r in self.regions[:30]],\n [r['id'] for r in json['regions']])\n\n def test_get_returns_correct_next_link(self):\n url = self.url + '/v1/regions'\n thirtieth_region = self.regions[29]\n response = self.get(url)\n self.assertSuccessOk(response)\n json = response.json()\n self.assertIn('links', json)\n for link_rel in json['links']:\n if link_rel['rel'] == 'next':\n break\n else:\n self.fail(\"No 'next' link was returned in response\")\n\n parsed_next = urllib.parse.urlparse(link_rel['href'])\n self.assertIn('marker={}'.format(thirtieth_region['id']),\n parsed_next.query)\n\n def test_get_returns_correct_prev_link(self):\n first_region = self.regions[0]\n thirtieth_region = self.regions[29]\n url = self.url + '/v1/regions?marker={}'.format(thirtieth_region['id'])\n response = self.get(url)\n self.assertSuccessOk(response)\n json = response.json()\n self.assertIn('links', json)\n for link_rel in json['links']:\n if link_rel['rel'] == 'prev':\n break\n else:\n self.fail(\"No 'prev' link was returned in response\")\n\n parsed_prev = urllib.parse.urlparse(link_rel['href'])\n self.assertIn('marker={}'.format(first_region['id']),\n parsed_prev.query)\n\n def test_follow_all_region_links(self):\n url = self.url + '/v1/regions'\n response = self.get(url)\n self.assertSuccessOk(response)\n json = response.json()\n regions = json['regions']\n while regions:\n for link in json['links']:\n if link['rel'] == 'next':\n break\n else:\n break\n response = self.get(link['href'])\n self.assertSuccessOk(response)\n json = response.json()\n regions = json['regions']\n","sub_path":"craton/tests/functional/test_region_calls.py","file_name":"test_region_calls.py","file_ext":"py","file_size_in_byte":11050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11535678","text":"from os import makedirs, listdir\nfrom os.path import dirname, join\nfrom languageflow.reader.tagged_corpus import TaggedCorpus\nimport argparse\nimport re\n\n\ndef preprocess(file):\n sentences = []\n for line in open(file):\n sentence = []\n line = line.strip()\n line = re.sub(r\"_+\", \"_\", line)\n if not line:\n continue\n tokens = line.strip().split(\" \")\n try:\n for token in tokens:\n if token.startswith(\"//\"):\n word = \"/\"\n tag = token[2:]\n else:\n word, tag = token.split(\"/\")\n word = word.replace(\"_\", \" \")\n sentence.append([word, tag])\n except:\n continue\n sentences.append(sentence)\n return sentences\n\n\ndef raw_to_corpus(sample, output):\n if output:\n output_folder = output\n else:\n output_folder = join(dirname(dirname(__file__)), \"data\", \"vlsp2013\", \"corpus\")\n try:\n makedirs(output_folder)\n except Exception as e:\n pass\n raw_names = [\"Trainset-POS-full\", \"Testset-POS\"]\n output_names = [\"train.txt\", \"test.txt\"]\n data_folder = join(dirname(dirname(__file__)), \"data\", \"vlsp2013\", \"raw\")\n for i, raw_folder in enumerate(raw_names):\n tagged_corpus = TaggedCorpus()\n sentences = []\n files = listdir(join(data_folder, raw_folder))\n files = [join(data_folder, raw_folder, file) for file in files]\n for file in files:\n sentences += preprocess(file)\n if sample != None:\n if len(sentences) > sample:\n sentences = sentences[:sample]\n break\n tagged_corpus.sentences = sentences\n output_file = join(output_folder, output_names[i])\n tagged_corpus.save(output_file)\n print(\"{} sentences is saved to file {}\".format(len(sentences), output_file))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"preprocess_vlsp2013.py\")\n parser.add_argument(\"--sample\", help=\"sample size\", type=int)\n parser.add_argument(\"--output\", help=\"output path\")\n args = parser.parse_args()\n if args.sample:\n if not args.output:\n parser.error(\"You must set --output when use option --sample\")\n raw_to_corpus(sample=args.sample, output=args.output)\n","sub_path":"util/preprocess_vlsp2013.py","file_name":"preprocess_vlsp2013.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"4592374","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 12:12:00 2019\n\n@author: brgupta\n\"\"\"\n\n# problem statement\n# https://www.hackerrank.com/challenges/list-comprehensions/problem\n\n\nif __name__ == '__main__':\n x = int(input())\n y = int(input())\n z = int(input())\n n = int(input())\n \n # ar = []\n # p = 0\n # for i in range (x+1):\n # for j in range(y+1):\n # for k in range(z+1):\n # if i+j+k !=n:\n # ar.append([])\n # ar[p] = [i,j,k]\n # p = p+1\n \n \n \n print([[i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if ((i+j+k)!=n)])\n\n ","sub_path":"Python/list_comprehensions.py","file_name":"list_comprehensions.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"321452534","text":"from django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.gis.geos import Polygon, MultiPolygon\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom api.models import DataFormat, Product, ProductFormat, OrderItem, Order\nfrom api.tests.factories import BaseObjectsFactory, ExtractFactory\n\n\nUserModel = get_user_model()\n\n\nclass ProductGroupTests(APITestCase):\n \"\"\"\n Test Products and groups of products\n \"\"\"\n\n def setUp(self):\n self.config = BaseObjectsFactory(self.client)\n self.extract_config = ExtractFactory(self.client)\n\n user_extern_extract = UserModel.objects.create_user(\n username=\"extern_extract\",\n password=\"testPa$$word\"\n )\n user_extern_extract.groups.add(Group.objects.get(name='extract'))\n user_extern_extract.save()\n\n self.group = Product.objects.create(\n label=\"Cadastre souterrain\",\n pricing=self.config.pricings['free'],\n provider=self.extract_config.user,\n metadata=self.config.public_metadata,\n status=Product.ProductStatus.PUBLISHED\n )\n\n self.child_group = Product.objects.create(\n label=\"Réseau d'eau\",\n group=self.group,\n pricing=self.config.pricings['free'],\n provider=self.extract_config.user,\n metadata=self.config.public_metadata,\n status=Product.ProductStatus.PUBLISHED\n )\n self.formats = DataFormat.objects.bulk_create([\n DataFormat(name=\"DXF\"),\n DataFormat(name=\"DWG\"),\n ])\n self.products = Product.objects.bulk_create([\n Product(\n label=\"Réseau d'eau de la commune d'Ankh\",\n group=self.child_group,\n pricing=self.config.pricings['free'],\n provider=self.extract_config.user,\n metadata=self.config.public_metadata,\n geom=MultiPolygon(Polygon((\n (2537498, 1210000),\n (2533183, 1180000),\n (2520000, 1180000),\n (2520000, 1210000),\n (2537498, 1210000)\n ))),\n status=Product.ProductStatus.PUBLISHED\n ),\n Product(\n label=\"Réseau d'eau de la commune de Morpork\",\n group=self.child_group,\n pricing=self.config.pricings['free'],\n provider=user_extern_extract,\n metadata=self.config.public_metadata,\n geom=MultiPolygon(Polygon((\n (2533183, 1180000),\n (2537498, 1210000),\n (2550000, 1210000),\n (2550000, 1180000),\n (2533183, 1180000)\n ))),\n status=Product.ProductStatus.PUBLISHED_ONLY_IN_GROUP\n ),\n Product(\n label=\"Réseau d'eau du Klatch\",\n group=self.child_group,\n pricing=self.config.pricings['free'],\n provider=user_extern_extract,\n metadata=self.config.public_metadata,\n geom=MultiPolygon(Polygon.from_bbox((2564000, 1212000, 2570000, 1207000))),\n status=Product.ProductStatus.PUBLISHED_ONLY_IN_GROUP\n )\n ])\n ProductFormat.objects.bulk_create([\n ProductFormat(product=self.products[0], data_format=self.config.formats['dxf']),\n ProductFormat(product=self.products[1], data_format=self.config.formats['dwg']),\n ProductFormat(product=self.products[2], data_format=self.config.formats['dxf']),\n ])\n\n OrderItem.objects.create(\n order=self.config.order,\n price_status=OrderItem.PricingStatus.CALCULATED,\n product=self.group,\n data_format=DataFormat.objects.create(name=\"ZIP\"),\n )\n\n def test_products_are_visible(self):\n url = reverse('product-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(len(response.data), 4, 'Check that all products are visible')\n\n def test_groups_are_expanded_when_confirmed(self):\n \"\"\"\n Client confirms an order with a `group` product.\n Each product in the group that intersects order geometry will be ready for extract.\n \"\"\"\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.config.client_token)\n url = reverse('order-confirm', kwargs={'pk':self.config.order.id})\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED, response.content)\n\n # First Extract user\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.extract_config.token)\n url = reverse('extract_order')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(len(response.data), 1, 'Response should have only one item')\n\n # Second Extract user\n url = reverse('token_obtain_pair')\n resp = self.client.post(\n url, {'username': 'extern_extract', 'password': 'testPa$$word'}, format='json')\n extern_token = resp.data['access']\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + extern_token)\n url = reverse('extract_order')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(len(response.data), 1, 'Response should have only one item')\n\n def test_upload_file_with_multi_provider(self):\n \"\"\"\n First Extract finishes all its jobs while second Extract haven't read its orders yet.\n \"\"\"\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.config.client_token)\n url = reverse('order-confirm', kwargs={'pk':self.config.order.id})\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED, response.content)\n\n # First Extract user\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.extract_config.token)\n url = reverse('extract_order')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(len(response.data), 1, 'Response should have only one item')\n\n order_item_id1 = response.data[0]['items'][0]['id']\n url = reverse('extract_orderitem', kwargs={'pk': order_item_id1})\n empty_zip_data = b'PK\\x05\\x06\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n extract_file = SimpleUploadedFile(\"result.zip\", empty_zip_data, content_type=\"multipart/form-data\")\n response = self.client.put(url, {'extract_result': extract_file, 'comment': 'ok'})\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED, response.content)\n self.assertEqual(\n Order.objects.get(pk=self.config.order.id).status,\n Order.OrderStatus.PARTIALLY_DELIVERED,\n \"Check order status is partially delivered\"\n )","sub_path":"back/api/tests/test_product_group.py","file_name":"test_product_group.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"211505514","text":"from Tkinter import *\nimport re\nimport Tkinter\n\n\ntkinter_umlauts = []\n\nclass AutocompleteEntry(Entry):\n def __init__(self, lista,frame,strv,*args, **kwargs):\n \n Entry.__init__(self,frame, *args, **kwargs)\n self.lista = lista\n self._hits = lista \n self.frame = frame \n self.strv = strv\n self.var = self[\"textvariable\"] = StringVar()\n if self.var == '':\n self.var = self[\"textvariable\"] \n self.bind('', self.handle_keyrelease)\n #self.var.trace('w', self.changed)\n self.bind(\"\", self.selection)\n self.bind(\"\", self.up)\n self.bind(\"\", self.down)\n self.position = 0\n self.lb_up = False\n self._hit_index = 0\n self.position = 0\n self.focus()\n self.changed()\n\n def handle_keyrelease(self, event):\n \"\"\"event handler for the keyrelease event on this widget\"\"\"\n if event.keysym == \"BackSpace\":\n self.delete(self.index(Tkinter.INSERT), Tkinter.END)\n self.position = self.index(Tkinter.END)\n self._hits = self.comparison()\n if event.keysym == \"Left\":\n if self.position < self.index(Tkinter.END): # delete the selection\n self.delete(self.position, Tkinter.END)\n else:\n self.position = self.position - 1 # delete one character\n self.delete(self.position, Tkinter.END)\n if event.keysym == \"Right\":\n self.position = self.index(Tkinter.END) # go to end (no selection)\n if event.keysym == \"Down\":\n self.autocomplete(1) # cycle to next hit\n if event.keysym == \"Up\":\n self.autocomplete(-1) # cycle to previous hit\n if len(event.keysym) == 1 or event.keysym in tkinter_umlauts:\n self.autocomplete()\n self.changed()\n if event.keysym == \"Return\" :\n self.destroy()\n if hasattr(self, 'lb') :\n self.frame.destroy()\n self.lb.destroy()\n def autocomplete(self, delta=0):\n \"\"\"autocomplete the Entry, delta may be 0/1/-1 to cycle through possible hits\"\"\"\n if delta: # need to delete selection otherwise we would fix the current position\n self.delete(self.position, Tkinter.END)\n else: # set position to end so selection starts where textentry ended\n self.position = len(self.get())\n # collect hits\n _hits = self.comparison()\n # if we have a new hit list, keep this in mind\n if _hits != self._hits:\n self._hit_index = 0\n self._hits = _hits\n # only allow cycling if we are in a known hit list\n if _hits == self._hits and self._hits:\n self._hit_index = (self._hit_index + delta) % len(self._hits)\n # now finally perform the auto completion\n if self._hits:\n self.delete(0,Tkinter.END)\n self.insert(0,self._hits[self._hit_index])\n self.select_range(self.position,Tkinter.END)\n return _hits\n\n\n def changed(self): \n #self.delete(self.position, Tkinter.END)\n words = self._hits\n if not hasattr(self, 'lb') :\n self.lb = Listbox(self.frame)\n self.lb.grid(row=1,column=0,columnspan=10,rowspan=20,sticky=W + E + N + S)\n self.lb.bind(\"\", self.selection)\n self.lb.bind(\"\", self.selection)\n self.lb_up = True\n self.lb.delete(0,END)\n for w in words:\n self.lb.insert(END,w)\n #else:\n # if self.lb_up:\n # self.lb.destroy()\n # self.frame.destroy()\n # self.lb_up = False\n \n def selection(self, event):\n\n if self.lb_up:\n self.var.set(self.lb.get(ACTIVE))\n self.strv.set(self.var.get())\n self.lb_up = False\n self.icursor(END)\n if hasattr(self, 'lb') :\n self.destroy()\n self.frame.destroy()\n self.lb.destroy()\n\n def up(self, event):\n\n if self.lb_up:\n if self.lb.curselection() == ():\n index = '0'\n else:\n index = self.lb.curselection()[0]\n if index != '0': \n self.lb.selection_clear(first=index)\n index = str(int(index) - 1) \n self.lb.selection_set(first=index)\n self.lb.activate(index) \n\n def down(self, event):\n\n if self.lb_up:\n if self.lb.curselection() == ():\n index = '0'\n else:\n index = self.lb.curselection()[0]\n if index != END: \n self.lb.selection_clear(first=index)\n index = str(int(index) + 1) \n self.lb.selection_set(first=index)\n self.lb.activate(index) \n\n def comparison(self):\n pattern = re.compile(self.var.get().lower() + '.*')\n return [w for w in self.lista if re.match(pattern, w.lower())]\n\nif __name__ == '__main__':\n root = Tk()\n\n entry = AutocompleteEntry(lista, root)\n entry.grid(row=0, column=0)\n entry.focus()\n root.mainloop()","sub_path":"AutoComplete.py","file_name":"AutoComplete.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"31465353","text":"\"\"\"\nThis script creates snapshots for all your instances or a set\nof instances received as argument.\n\nIt uses boto just like ansible, and you don't need to specify\ncredentials. You can use multiple boto profiles as described here:\n\nhttp://boto.cloudhackers.com/en/latest/boto_config_tut.html\n\nArguments:\n ALL arguments are optional.\n -v Verbose mode\n -h help\n -i \n -p , default: default\n -n <1..1000>, number of snapshots to keep for each volume\n -l <1..1000>, number of hours that will elapse before creating a new snapshot\n\n\nExamples:\n python snapshot_instances.py\n This will snapshot all volumes of all instances, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -i i-342abc3\n This will snapshot all volumes of i-342abc3, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -i i-342abc3,i-342abc4,i-213abc2\n This will snapshot all volumes of i-342abc3, keeping 10 snapshots\n creating a new one every day\n\n python snapshot_instances.py -p work -n 4 -l 168\n Snapshot all instances in your work profile, creating one snapshot\n per week, for 4 weeks\n\n python snapshot_instances.py -v\n Same as the first example, printing status as it goes.\n\"\"\"\n\n\nimport boto\nimport sys\nimport getopt\nimport datetime\nfrom datetime import timedelta\nimport dateutil.parser\nfrom dateutil import tz\n\n\nboto_profile = 'default'\nnumber_snapshots = 10\nlifetime = 24*7\n# lifetime is the number of hours minimum to create a new snapshot\n# defaults to 1 week\n\ninstance_ids = ''\naws_conn = None\nverbose = False\n\n\ndef parse_args(argv):\n global boto_profile, number_snapshots, lifetime, instance_ids, verbose\n try:\n opts, args = getopt.getopt(argv, \"p:n:l:i:vh\")\n except getopt.GetoptError:\n print(\"Error parsing options\")\n help()\n sys.exit(1)\n\n for opt, arg in opts:\n if opt == '-p':\n boto_profile = arg\n elif opt == '-h':\n help()\n sys.exit(0)\n elif opt == '-v':\n verbose = True\n elif opt == '-n':\n try:\n number_snapshots = int(arg)\n if number_snapshots < 1 or number_snapshots > 1000:\n number_snapshots = 10\n raise ValueError(\"Incorrect number of snapshots\")\n except:\n print(\"Incorrect number of snapshots, using default %s\" %\n number_snapshots)\n elif opt == '-l':\n try:\n lifetime = int(arg)\n if lifetime < 1 or lifetime > 1000:\n lifetime = 24\n raise ValueError(\"Incorrect lifetime(-l) value\")\n except:\n print(\"Invalid -l argument, using default %s\" % lifetime)\n elif opt == '-i':\n if arg.index(','):\n instance_ids = arg.split(',')\n else:\n instance_ids = [arg]\n else:\n print(\"Bad option: %s\" % opt)\n\n\ndef traverse_instances():\n global conn, instance_ids\n reservations = conn.get_all_reservations()\n for r in reservations:\n for i in r.instances:\n if i.id in instance_ids:\n instance_ids.remove(i.id)\n traverse_all_volumes(i)\n if len(instance_ids) > 0:\n print(\"\\nError: the following instance ids where NOT found: %s\" %\n instance_ids)\n\n\ndef traverse_all_instances():\n global conn\n reservations = conn.get_all_reservations()\n for r in reservations:\n for i in r.instances:\n traverse_all_volumes(i)\n\n\ndef traverse_all_volumes(instance_obj):\n global conn, verbose\n if verbose:\n if 'Name' in instance_obj.tags:\n print(\"Instance: %s, Name: %s\" %\n (instance_obj.id, instance_obj.tags['Name']))\n else:\n print(\"Instance: %s\" % instance_obj.id)\n volumes = conn.get_all_volumes(\n filters={'attachment.instance-id': instance_obj.id})\n for v in volumes:\n check_volume(v)\n\n\ndef check_volume(volume_obj):\n global conn, number_snapshots, verbose\n if verbose:\n if 'Name' in volume_obj.tags:\n print(\" Volume: %s, Name: %s\" %\n (volume_obj.id, volume_obj.tags['Name']))\n else:\n print(\" Volume: %s\" % volume_obj.id)\n\n# first delete snapshots if necessary\n snapshots = volume_obj.snapshots()\n ordered_snaps = sorted(snapshots,\n key=lambda vol: vol.start_time, reverse=True)\n if len(snapshots) >= number_snapshots:\n while (len(ordered_snaps) >= number_snapshots):\n del_snap = ordered_snaps.pop()\n print(\" Deleting snapshot: %s, description: %s\" % (\n del_snap.id, del_snap.description))\n del_snap.delete()\n\n now = datetime.datetime.now(tz.tzlocal())\n clock = now - timedelta(hours=lifetime)\n\n# create a snapshot if time is righ\n if len(ordered_snaps) > 0:\n newest = ordered_snaps.pop(0)\n start_time = dateutil.parser.parse(newest.start_time)\n\n if start_time < clock:\n snapshot_volume(volume_obj)\n elif verbose:\n print(\" Recent snapshot found: %s, %s\" %\n (newest.id, newest.description))\n# create snapshot if the instance doesn't have one\n else:\n snapshot_volume(volume_obj)\n\n\ndef snapshot_volume(volume_obj):\n now = datetime.datetime.now()\n desc = volume_obj.id + now.strftime(\"_%Y-%m-%d\")\n print(\" Creating snapshot %s\" % desc)\n volume_obj.create_snapshot(desc)\n\n\ndef help():\n global number_snapshots, lifetime, boto_profile\n print(\"\"\"\nUse: \n -n \n -l \n -p \n -i \n -v verbose mode\n\"\"\" % (number_snapshots, lifetime, boto_profile))\n\n\nif __name__ == \"__main__\":\n parse_args(sys.argv[1:])\n instances = 'all'\n if type(instance_ids) is list:\n instances = str(instance_ids)\n if verbose:\n print(\"\"\"Using boto profile: %s\nNumber of snapshots to keep for each volume: %s\nNumber of hours before creating a new snapshot: %s\nInstances to consider: %s\n\"\"\" % (boto_profile, number_snapshots, lifetime, instances))\n\n conn = boto.connect_ec2(profile_name=boto_profile)\n if len(instance_ids) > 0:\n traverse_instances()\n else:\n traverse_all_instances()\n","sub_path":"python/snapshot_instances.py","file_name":"snapshot_instances.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"361162786","text":"import logging\n\nfrom decimal import Decimal, InvalidOperation\n\nfrom backbone_server.location.post import LocationPost\nfrom backbone_server.location.put import LocationPut\nfrom backbone_server.location.get import LocationGetById\nfrom backbone_server.location.gets import LocationsGet\nfrom backbone_server.location.delete import LocationDelete\nfrom backbone_server.location.get_by_name import LocationGetByPartnerName\nfrom backbone_server.location.get_by_gps import LocationGetByGPS\n\nfrom backbone_server.controllers.base_controller import BaseController\n\nfrom backbone_server.errors.duplicate_key_exception import DuplicateKeyException\nfrom backbone_server.errors.missing_key_exception import MissingKeyException\n\nfrom backbone_server.controllers.decorators import apply_decorators\n\n\n@apply_decorators\nclass LocationController(BaseController):\n\n def create_location(self, location, user=None, auths=None):\n \"\"\"\n create_location\n Create a location\n :param location:\n :type location: dict | bytes\n\n :rtype: Location\n \"\"\"\n\n retcode = 201\n loc = None\n\n try:\n post = LocationPost(self.get_connection())\n\n loc = post.post(location)\n except DuplicateKeyException as dke:\n logging.getLogger(__name__).debug(\n \"create_location: {}\".format(repr(dke)))\n retcode = 422\n\n return loc, retcode\n\n def delete_location(self, location_id, user=None, auths=None):\n \"\"\"\n deletes an location\n\n :param location_id: ID of location to fetch\n :type location_id: str\n\n :rtype: None\n \"\"\"\n\n delete = LocationDelete(self.get_connection())\n\n retcode = 200\n\n try:\n delete.delete(location_id)\n except MissingKeyException as dme:\n logging.getLogger(__name__).debug(\n \"delete_location: {}\".format(repr(dme)))\n retcode = 404\n\n return None, retcode\n\n def download_gps_location(self, latitude, longitude, user=None, auths=None):\n \"\"\"\n fetches location(s) by GPS\n Params must be string as negative numbers not handled - https://github.com/pallets/werkzeug/issues/729 - also want to avoid using float\n :param latitude: Latitude of location to fetch\n :type latitude: str\n :param longitude: Longitude of location to fetch\n :type longitude: str\n\n :rtype: Location\n \"\"\"\n\n get = LocationGetByGPS(self.get_connection())\n\n retcode = 200\n loc = None\n\n try:\n lat = Decimal(latitude)\n lng = Decimal(longitude)\n loc = get.get(lat, lng)\n except MissingKeyException as dme:\n logging.getLogger(__name__).debug(\n \"download_partner_location: {}\".format(repr(dme)))\n retcode = 404\n loc = str(dme)\n except InvalidOperation as nfe:\n logging.getLogger(__name__).debug(\n \"download_partner_location: {}\".format(repr(nfe)))\n retcode = 422\n loc = str(nfe)\n\n return loc, retcode\n\n def download_location(self, location_id, user=None, auths=None):\n \"\"\"\n fetches an location\n\n :param locationId: ID of location to fetch\n :type locationId: str\n\n :rtype: Location\n \"\"\"\n\n get = LocationGetById(self.get_connection())\n\n retcode = 200\n loc = None\n\n try:\n loc = get.get(location_id)\n except MissingKeyException as dme:\n logging.getLogger(__name__).debug(\n \"download_location: {}\".format(repr(dme)))\n retcode = 404\n loc = str(dme)\n\n return loc, retcode\n\n def download_locations(self, study_name=None, start=None, count=None, orderby=None, user=None,\n auths=None):\n \"\"\"\n fetches locations\n\n :param study_name: restrict to a particular study\n :type study_name: str\n :param start: for pagination start the result set at a record x\n :type start: int\n :param count: for pagination the number of entries to return\n :type count: int\n :param orderby: how to order the result set\n :type orderby: str\n\n :rtype: Locations\n \"\"\"\n\n get = LocationsGet(self.get_connection())\n\n retcode = 200\n loc = None\n\n loc = get.get(study_name, start, count, orderby)\n\n return loc, retcode\n\n def download_partner_location(self, partner_id, user=None, auths=None):\n \"\"\"\n fetches location(s) by partner name\n\n :param partner_id: ID of location to fetch\n :type partner_id: str\n\n :rtype: Locations\n \"\"\"\n\n get = LocationGetByPartnerName(self.get_connection())\n\n retcode = 200\n loc = None\n\n try:\n loc = get.get(partner_id)\n except MissingKeyException as dme:\n logging.getLogger(__name__).debug(\n \"download_partner_location: {}\".format(repr(dme)))\n retcode = 404\n loc = str(dme)\n\n return loc, retcode\n\n def update_location(self, location_id, location, user=None, auths=None):\n \"\"\"\n updates an location\n\n :param location_id: ID of location to update\n :type location_id: str\n :param location:\n :type location: dict | bytes\n\n :rtype: Location\n \"\"\"\n\n retcode = 200\n loc = None\n\n try:\n put = LocationPut(self.get_connection())\n\n loc = put.put(location_id, location)\n except DuplicateKeyException as dke:\n logging.getLogger(__name__).debug(\n \"update_location: {}\".format(repr(dke)))\n retcode = 422\n loc = str(dke)\n except MissingKeyException as dme:\n logging.getLogger(__name__).debug(\n \"update_location: {}\".format(repr(dme)))\n retcode = 404\n loc = str(dme)\n\n return loc, retcode\n","sub_path":"server/backbone_server/controllers/location_controller.py","file_name":"location_controller.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"186656999","text":"m,n=map(int,input().split())\nseei,seej=set(),set()\nx,y,ctr=min(m,n),max(m,n),0\nfor i in range(1,y+1):\n for j in range(1,x+1):\n if (i+j)%5==0 and j not in seej and i not in seei:\n ctr+=1\n seej.add(j)\n seei.add(i)\nprint(ctr)","sub_path":"Practice/IntDivBy5.py","file_name":"IntDivBy5.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466547027","text":"\nfrom collections import deque\n\n\ndef dfs(graph, start_node, visited=None):\n if not visited:\n visited = deque()\n\n if start_node not in visited:\n visited.append(start_node)\n\n neighbours = graph.get(start_node, [])\n\n for node in neighbours:\n dfs(graph, node, visited)\n return visited\n","sub_path":"graph/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"550070406","text":"import asyncio\nimport aiohttp\nimport async_timeout\nimport tqdm\nimport os\n\nasync def download_coroutine(session, url, fname):\n sem = asyncio.Semaphore(20)\n async with sem:\n with async_timeout.timeout(10):\n if os.path.exists(str(fname)+\".png\"):\n return\n async with session.get(url) as response:\n filename = str(fname)+\".png\"\n with open(filename, 'wb') as f_handle:\n while True:\n chuck = await response.content.read(1024)\n if not chuck:\n break\n f_handle.write(chuck)\n return await response.release()\n\n\nasync def download(loop, urllist, d_desc):\n '''\n :param loop:\n :param urllist:\n :return null:\n '''\n async with aiohttp.ClientSession(loop=loop) as session:\n for (key, val) in tqdm.tqdm(urllist.items(), desc=d_desc, total=len(urllist)):\n await download_coroutine(session, val, key)\n\n\ndef main(downloadList, d_desc):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(download(loop, downloadList, d_desc))\n\n\nif __name__ == '__main__':\n main(\"test\")","sub_path":"asynMapLoader.py","file_name":"asynMapLoader.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"462307246","text":"import datetime\n\n\n# Все пункты сделать как отдельные функции(можно создавать дополнительные вспомагательные функции)\n#\n# 1. Написать функцию, которая получает в виде параметра имя файла названия интернет доменов (domains.txt)\n# и возвращает их в виде списка строк (названия возвращать без точки).\n\n\ndef get_data(filename):\n with open(filename, \"r\") as txt_file:\n data = txt_file.readlines()\n return data\n\n\ndef get_domains_list(data):\n new_list = get_data(\"domains.txt\")\n return [element[1:-1] for element in new_list]\n\n\nprint(get_domains_list(get_data(\"domains.txt\")))\n\n\n# 2. Написать функцию, которая получает в виде параметра имя файла (names.txt)\n# и возвращает список всех фамилий из него.\n# Каждая строка файла содержит номер, фамилию, страну, некоторое число (таблица взята с википедии).\n# Разделитель - символ табуляции \"\\t\"\n\n\ndef create_list_of_persons(data):\n names = []\n names_list = [element.split(\"\\t\") for element in data]\n for person in names_list:\n names.append(person[1])\n return names\n\n\nprint(create_list_of_persons(get_data(\"names.txt\")))\n\n\n# 3. Написать функцию, которая получает в виде параметра имя файла (authors.txt) и возвращает список\n# словарей вида {\"date_original\": date_original, \"date_modified\": date_modified}\n# в которых date_original - это дата из строки (если есть),\n# а date_modified - эта же дата, представленная в формате \"dd/mm/yyyy\" (d-день, m-месяц, y-год)\n# Например [{\"date_original\": \"8th February 1828\", \"date_modified\": 08/02/1828}, ...]\n\n\ndef format_date(data: list) -> list:\n dates = []\n for element in data:\n elements = element.split(' ')\n elements[0] = elements[0][0:-2]\n date_str = ' '.join(elements)\n try:\n date = datetime.datetime.strptime(date_str, \"%d %B %Y\").date()\n except:\n pass\n else:\n dates.append({\n 'date_original': element,\n 'date_modified': datetime.datetime.strftime(date, \"%d/%m/%Y\")\n })\n return dates\n\n\ndef process_data(data: list) -> list:\n split_list = []\n for line in data:\n items = line.split(\" - \")\n dates = format_date(items)\n split_list.extend(dates)\n return split_list\n\n\nprint(process_data(get_data(\"authors.txt\")))\n","sub_path":"HW_10/hw_10.py","file_name":"hw_10.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"524091364","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef get_url_data(url):#获取网页数据\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n reponse = requests.get(url, headers = headers)\n return reponse.content\n\ndef deal_url_data(byte_data):#用美丽汤处理文本\n soup = BeautifulSoup(byte_data, 'lxml')\n soup1 = soup.find_all('table', class_=\"c-table opr-toplist1-table\")\n soup2 = soup1[0].find_all('a', target=\"_blank\")\n soup3 = soup1[0].find_all('td', class_=\"opr-toplist1-right\")\n for i in range(len(soup2)):\n print(str(i+1) + '.' + soup2[i].string + ' ' + soup3[i].text)\n\n\nurl = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=0&rsv_idx=1&tn=baidu&wd=%E6%8C%89%E6%97%B6&rsv_pq=cdf9649a0000a7ca&rsv_t=8a2c4MPWQ8TfqgPcYbPtieaKRshfRaBPxdW5dTlulEigBNgurRVEyaaTNPs&rqlang=cn&rsv_enter=0&rsv_sug3=3&rsv_sug1=3&rsv_sug7=100&inputT=2580&rsv_sug4=2580'\nbyte_data = get_url_data(url)\ndeal_url_data(byte_data)\n","sub_path":"杂项/百度热搜.py","file_name":"百度热搜.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"593650720","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n arrayVal = []\n\n def inOrderTraverseValidation(self, node):\n if not node:\n return\n\n self.inOrderTraverseValidation(node.left)\n self.arrayVal.append(node.val)\n self.inOrderTraverseValidation(node.right)\n \n return\n\n def isValidBST(self, root):\n self.arrayVal = []\n self.inOrderTraverseValidation(root)\n print(self.arrayVal)\n\n for i in range(0, len(self.arrayVal) - 1):\n if self.arrayVal[i] >= self.arrayVal[i + 1]:\n return False\n return True\n\nHead = TreeNode(10)\nHead.left = TreeNode(5)\nHead.right = TreeNode(15)\nHead.right.left = TreeNode(6)\nHead.right.right = TreeNode(20)\n\n\n# Head.left.left = TreeNode(4)\n# Head.left.right = TreeNode(5)\n\n# Head.left.left.left = TreeNode(8)\n# Head.left.left.right = TreeNode(9)\n\n\n# Head.left.right.left = TreeNode(9)\n# Head.left.right.right = TreeNode(8)\n\n# Head.right.left = TreeNode(5)\n# Head.right.right = TreeNode(4)\n# Head.right.right = TreeNode(7)\n\n\ntestClass = Solution()\nprint(testClass.isValidBST(Head))\n","sub_path":"98-validate-binary-search-tree/arrayOrder.py","file_name":"arrayOrder.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"199757152","text":"import mcturtle.minecraftturtle as minecraftturtle\nimport mcturtle.minecraft as minecraft\nimport mcturtle.block as block\n\n\nmc = minecraft.Minecraft.create()\npos = mc.player.getPos()\nsteve = minecraftturtle.MinecraftTurtle(mc,pos)\n\nsteve.penblock(block.WOOL.id, 1)\nsteve.speed(10)\n\nsteve.penup()\nsteve.up(90)\nsteve.forward(50)\nsteve.down(90)\nsteve.pendown()\n\n\n\ndef circle():\n for i in range(60):\n steve.down(6)\n steve.forward(2)\n\nfor i in range(30):\n circle()\n steve.right(6)\n steve.penblock(block.MELON.id, 0)\n circle()\n steve.right(6)\n steve.penblock(block.TNT.id, 0)\n","sub_path":"bauble.py","file_name":"bauble.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"407878534","text":"import requests\nfrom requests.exceptions import ConnectionError\n\nbase_headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'\n}\n\n\ndef get_page(url, options={}):\n \"\"\"\n 抓取代理\n :param url:\n :param options:\n :return:\n \"\"\"\n headers = dict(base_headers, **options) # dict() 函数用于创建一个字典。返回一个字典。 dict(a='a', b='b', t='t') {'a': 'a', 'b': 'b', 't': 't'}\n print('正在抓取', url)\n try:\n response = requests.get(url, headers=headers)\n print('抓取成功', url, response.status_code)\n if response.status_code == 200:\n return response.text\n except ConnectionError:\n print('抓取失败', url)\n return None\n\nif __name__ == '__main__':\n get_page('http://www.baidu.com')","sub_path":"学习文档/ProxyPool/proxypool/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"257368820","text":"from django import template\nfrom random import randint\nfrom minerals.models import Mineral\n\nregister = template.Library()\n\n\n@register.filter\ndef replace_underscores_with_space(key):\n return ' '.join(key.split('_'))\n\n\n@register.simple_tag\ndef generate_random_num():\n mineral_count = Mineral.objects.all().count()\n return randint(1, mineral_count)\n\n\n@register.filter\ndef get_dict_value(obj, key):\n try:\n if obj[key] != '':\n return obj[key]\n else:\n return '-'\n except KeyError:\n return ''\n","sub_path":"mineral_catalog/minerals/templatetags/mineral_extras.py","file_name":"mineral_extras.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"244021225","text":"from Domain.Student import Student\nfrom Domain.Exceptions import StudentException\n\nclass StudentRepository:\n def __init__(self):\n '''\n creates an instance of the StudentRepository\n '''\n self.__data = []\n \n def __find(self, ID):\n '''\n returns the index Student having the given ID\n Input: ID - positive integer, the ID of the Student that is being searched for\n Output: index - if the Student was found, -1 - otherwise \n '''\n for i in range(len(self.__data)):\n if self.__data[i].getID() == ID:\n return i\n return -1\n \n def findByID(self, ID):\n '''\n returns the Student having the given ID\n Input: ID - positive integer, the ID of the Student that is being searched for\n Output: the Student, if found or None otherwise\n '''\n indexID = self.__find(ID) \n if indexID == -1:\n return None\n return self.__data[indexID]\n \n def add(self, stu):\n '''\n adds a Student to the repository\n Input: stu - object of type Student\n Output: the given Student is added to the repository, if no other medicine with the same ID exists\n Exceptions: raises StudentException if another Student with the same ID already exists\n '''\n if self.findByID(stu.getID()) != None:\n raise StudentException(\"Student with ID \" + str(stu.getID()) + \" already exists!\")\n self.__data.append(stu)\n \n def update(self, ID, newName):\n '''\n updates a Student from the repository, using its ID\n Input: ID - positive integer, the ID of the Student that must be updated\n newName - string, updated name of the Student\n Output: if such a Student exists, it is updated\n Exceptions: raises StudentException if a Student with the given ID does not exist\n '''\n indexID = self.__find(ID)\n if indexID == -1:\n raise StudentException(\"The is no Student with ID \" + str(ID) + \"!\")\n self.__data[indexID].setName(newName)\n \n def remove(self, ID):\n '''\n removes a Student from the repository, using its ID\n Input: ID - positive integer, the ID of the Student that must be removed\n Output: if such a Student exists, it is removed and returned\n Exceptions: raises StudentException if a Student with the given ID does not exist\n '''\n indexID = self.__find(ID)\n if indexID == -1:\n raise StudentException(\"The is no Student with ID \" + str(ID) + \"!\")\n self.__data.pop(indexID)\n \n def __len__(self):\n '''\n returns the size of the list of students\n '''\n return len(self.__data)\n \n def getAll(self):\n '''\n returns the list of students\n '''\n return self.__data\n\ndef testStudentRepository():\n repo = StudentRepository()\n \n s1 = Student(1, \"Vasilica\")\n s2 = Student(1, \"Gheorghidiu\")\n \n assert len(repo) == 0\n \n repo.add(s1)\n assert len(repo) == 1\n assert repo.findByID(1) == s1\n \n try:\n repo.add(s1)\n assert False\n except StudentException:\n assert True\n \n try:\n repo.add(s2)\n assert False\n except StudentException:\n assert True\n \n s2 = Student(2, \"Gheorghidiu\")\n repo.add(s2)\n assert len(repo) == 2\n assert repo.findByID(1) == s1\n assert repo.findByID(2) == s2\n \n repo.update(2,\"Johnny Bravo\")\n \n assert len(repo) == 2\n repo.remove(1)\n assert len(repo) == 1\n assert repo.findByID(2) == s2\n assert repo.findByID(1) == None\n \n try:\n repo.remove(1)\n assert False\n except StudentException:\n assert True\n \n repo.remove(2)\n assert len(repo) == 0\n \nif __name__ == '__main__':\n testStudentRepository()\n","sub_path":"Repository/StudentRepository.py","file_name":"StudentRepository.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"34750285","text":"\n\n#calss header\nclass _MANDATE():\n\tdef __init__(self,): \n\t\tself.name = \"MANDATE\"\n\t\tself.definitions = [u'the authority given to an elected group of people, such as a government, to perform an action or govern a country: ', u'the name of an area of land that has been given to a country by the UN, following or as part of a peace agreement']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_mandate.py","file_name":"_mandate.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"352617770","text":"import matplotlib as mplt\nmplt.use('Agg')\n\nfrom matplotlib import rc\n\ndefault_linewidth = 2.0;\ndefault_ticksize = 10.0;\n\nmplt.rcParams['lines.linewidth'] = default_linewidth;\nmplt.rcParams['axes.linewidth'] = default_linewidth;\nmplt.rcParams['xtick.major.size'] = default_ticksize;\nmplt.rcParams['xtick.major.width'] = default_linewidth;\nmplt.rcParams['ytick.major.size'] = default_ticksize;\nmplt.rcParams['ytick.major.width'] = default_linewidth;\n\n#rc('font', **{'family':'sans-serif', 'serif': 'Bitstream Vera Serif', 'sans-serif': 'MS Reference Sans Serif', 'size': 20.0});\nrc('font', **{'size': 20.0});\nrc('axes', **{'labelsize': 20.0});\nrc('mathtext', **{'fontset':'stixsans'});\nrc(('xtick.major','ytick.major'), pad=20)\n\n#import matplotlib.font_manager as fm;\n#print(\"%s: %d\"%(fm.FontProperties().get_name(),fm.FontProperties().get_weight()));\n\nimport matplotlib.pyplot as plt\n\nimport sys, argparse\nfrom netCDF4 import Dataset\nimport numpy as np\nfrom pprint import pprint\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-dir')\nparser.add_argument('--ref-data-dir')\nparser.add_argument('--data-file')\n\nparser.add_argument('--casenames')\nparser.add_argument('--ref-casenames')\nparser.add_argument('--legends')\n\nparser.add_argument('--domain-file')\nparser.add_argument('--indexing', default=\":\")\nparser.add_argument('--varname-mean')\nparser.add_argument('--varname-var')\nparser.add_argument('--display-varname')\nparser.add_argument('--yscale', type=float, default=1.0)\nparser.add_argument('--ylabel', default=\"\")\nparser.add_argument('--extra-title', default=\"\")\nparser.add_argument('--extra-filename', default=\"\")\nparser.add_argument('--colors')\nparser.add_argument('--linestyles', type=str)\nparser.add_argument('--y-offset', type=float, default=0.0)\nparser.add_argument('--ymax-mean', type=float)\nparser.add_argument('--ymax-std', type=float)\nparser.add_argument('--tick-levs-mean', type=float)\nparser.add_argument('--tick-levs-std', type=float)\n\nargs = parser.parse_args()\n\npprint(args)\n\ncasenames = args.casenames.split(\",\")\nlegends = args.legends.split(\",\")\ncolors = args.colors.split(\",\")\nlinestyles = args.linestyles.split(\",\")\nindexing = args.indexing.split(\",\")\nfigsize = tuple(map(float, args.figsize.split(',')))\n\nindexes = []\nprint(\"Constructing indexing\")\nfor i, content in enumerate(indexing):\n if content == \":\":\n indexes.append(slice(None))\n else:\n indexes.append(int(content))\n\nindexes = tuple(indexes)\nprint(\"Indices: \", indexes) \n \nprint(\"Going to compare these models:\")\npprint(casenames)\n\ndatas1 = []\ndatas2 = []\n\nfor i in range(N_cases):\n\n print(\"Loading case: %s\" % casenames[i]) \n print(\" case: %s\" % ref_casenames[i]) \n f1 = Dataset(\"%s/%s/%s\" % (args.data_dir, casenames[i], args.data_file), \"r\")\n f2 = Dataset(\"%s/%s/%s\" % (args.ref_data_dir, ref_casenames[i], args.data_file), \"r\")\n \n datas1.append([f1.variables[args.varname_mean][idxes] / scale , f1.variables[args.varname_std][idxes] / scale])\n datas2.append([f2.variables[args.varname_mean][idxes] / scale, f2.variables[args.varname_std][idxes] / scale])\n\n f1.close()\n f2.close()\n\n\nf = Dataset(args.domain_file, \"r\")\nlat = f.variables[\"yc\"][:, 1]\nf.close()\n\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize, sharex=True)\n\nax1.set_title(\"Mean of %s%s\" % (args.display_varname, args.extra_title))\nax2.set_xlabel(\"Latitude [deg]\")\nax2.set_xticks([-90, -60, -30, 0, 30, 60, 90])\n\nax1.set_ylabel(args.ylabel)\nax2.set_ylabel(args.ylabel)\n\n\nfor i in range(N_cases):\n _mean = datas1[i][0] - datas2[i][0]\n _std = datas1[i][1] - datas2[i][1]\n ax1.plot(lat, _mean, linewidth=2, label=legends[i], color=colors[i], linestyle=linestyles[i])\n ax2.plot(lat, _std, linewidth=2, label=legends[i], color=colors[i], linestyle=linestyles[i])\n\nax1.legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=False)\nfig.subplots_adjust(right=0.7, bottom=0.2)\n\n\nax1.grid(True)\nax2.grid(True)\n\nax1.set_ylim(np.array([-1.0, 1.0]) * args.ymax_mean_diff)\nax2.set_ylim(np.array([ 0.0, 1.0]) * args.ymax_std_diff)\n\nfig.savefig(\"%s/diffcase_meridional_mean_std_%s_%s%s.png\" % (args.output_dir, args.varname_mean, args.varname_var, args.extra_filename), dpi=200)\n\n#plt.show()\n","sub_path":"other_src/diagnose_scripts/plot/plot_diffcase_meridional_mean_std.py","file_name":"plot_diffcase_meridional_mean_std.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"244014496","text":"import skimage\nfrom skimage import io\nimport numpy as np\nimport random\nimport os\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageEnhance, ImageChops\nimport cv2\n#root_path为图像根目录,img_name为图像名字\n\nimport pprint\n#import this to use flags\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_string(\"images_dir\", \"./images\", \"Directory name saving the origin images need to preprocessing.\")\nflags.DEFINE_string(\"img_save_path\", \"./images_by_processing\", \"Directory name to save the processing images.\")\nflags.DEFINE_multi_integer(\"translation\", None, \"Translate image.\")\nflags.DEFINE_bool(\"flip\", False, \"Flip image.\")\nflags.DEFINE_bool(\"contrast\", False, \"Contrast image.\")\nflags.DEFINE_integer(\"rotation\", None, \"Rotation image.\")\nflags.DEFINE_bool(\"G_noise\", False, \"Add Gaussian noise followed the Standard normal distribution into image.\")\nflags.DEFINE_bool(\"Color_dithering\", False, \"Add color dithering into image.\")\n\nFLAGS = flags.FLAGS\npp = pprint.PrettyPrinter()\n\ndef translation(img_path, img_name, off = 0): #平移\n img = Image.open(img_path)\n if len(FLAGS.translation) == 2:\n img_offset = ImageChops.offset(img, FLAGS.translation[0], FLAGS.translation[1])\n elif len(FLAGS.translation) == 2:\n img_offset = ImageChops.offset(img, FLAGS.translation[0], 0)\n else:\n print('flags --translation should not be specify more than three times.')\n\n img_offset.save(os.path.join(FLAGS.img_save_path, img_name + '_offset.jpg'))\n #img_offset.save(os.path.join(img_path.split('.')[0] + '_offset.jpg'))\n #return offset\n\ndef flip(img_path, img_name): #翻转图像\n img = Image.open(img_path)\n flip_img = img.transpose(Image.FLIP_LEFT_RIGHT)\n flip_img.save(os.path.join(FLAGS.img_save_path, img_name + '_flip.jpg'))\n #flip_img.save(os.path.join(img_name.split('.')[0] + '_flip.jpg'))\n #return flip_img\n\ndef aj_contrast(img_path, img_name): #调整对比度 两种方式 gamma/log\n image = io.imread(img_path)\n gam= skimage.exposure.adjust_gamma(image, 0.5)\n io.imsave(os.path.join(FLAGS.img_save_path, img_name + '_gam.jpg'),gam)\n #skimage.io.imsave(os.path.join(img_path.split('.')[0] + '_gam.jpg'),gam)\n log= skimage.exposure.adjust_log(image)\n io.imsave(os.path.join(FLAGS.img_save_path, img_name + '_log.jpg'),gam)\n #skimage.io.imsave(os.path.join(img_path.split('.')[0] + '_log.jpg'),log)\n #return gam,log\ndef rotation(img_path, img_name, angle):\n img = Image.open(img_path)\n rotation_img = img.rotate(angle) #旋转角度\n rotation_img.save(os.path.join(FLAGS.img_save_path, img_name + '_rotation.jpg'))\n #rotation_img.save(os.path.join(img_path.split('.')[0] + '_rotation.jpg'))\n #return rotation_img\n\ndef randomGaussian(img_path, img_name, mean = 0, sigma = 1): #高斯噪声\n image = Image.open(img_path)\n im = np.array(image)\n #设定高斯函数的偏移\n means = 0\n #设定高斯函数的标准差\n sigma = 25\n #r通道\n r = im[:,:,0].flatten()\n\n #g通道\n g = im[:,:,1].flatten()\n\n #b通道\n b = im[:,:,2].flatten()\n\n #计算新的像素值\n for i in range(im.shape[0]*im.shape[1]):\n\n pr = int(r[i]) + random.gauss(0,sigma)\n\n pg = int(g[i]) + random.gauss(0,sigma)\n\n pb = int(b[i]) + random.gauss(0,sigma)\n\n if(pr < 0):\n pr = 0\n if(pr > 255):\n pr = 255\n if(pg < 0):\n pg = 0\n if(pg > 255):\n pg = 255\n if(pb < 0):\n pb = 0\n if(pb > 255):\n pb = 255\n r[i] = pr\n g[i] = pg\n b[i] = pb\n im[:,:,0] = r.reshape([im.shape[0],im.shape[1]])\n\n im[:,:,1] = g.reshape([im.shape[0],im.shape[1]])\n\n im[:,:,2] = b.reshape([im.shape[0],im.shape[1]])\n gaussian_image = gaussian_image = Image.fromarray(np.uint8(im))\n gaussian_image.save(os.path.join(FLAGS.img_save_path, img_name + '_gaussian.jpg'))\n #gaussian_img.save(os.path.join(img_path.split('.')[0] + '_gaussian.jpg'))\n #return gaussian_image\ndef randomColor(img_path, img_name): #随机颜色\n \"\"\"\n 对图像进行颜色抖动\n :param image: PIL的图像image\n :return: 有颜色色差的图像image\n \"\"\"\n image = Image.open(os.path.join(img_path))\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度\n random_factor = np.random.randint(10, 21) / 10. # 随机因子\n brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度\n random_factor = np.random.randint(10, 21) / 10. # 随机因子\n contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度\n random_factor = np.random.randint(0, 31) / 10. # 随机因子\n dithering_img = ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度\n dithering_img.save(os.path.join(FLAGS.img_save_path, img_name + '_dithering.jpg'))\n #dithering_img.save(os.path.join(img_path.split('.')[0] + '_dithering.jpg'))\n #return ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度\n\ndef main(_):\n pp.pprint(FLAGS.__flags)\n\n print('You should specify the operation you want to process images, otherwise, nothing will be done.\\n')\n print('You can use --translation=offset to translate images x axis translation(Translation fuction should not be use).\\n \\\n use --translation=offset twice can translate images both x and y axises. \\n \\\n --flip to filp images\\n \\\n --contrast to adjust the contrast of images\\n \\\n --rotation=angle to rotate images, default Rotate 90 degrees clockwise \\n \\\n --G_noise to add Gaussian noise into images\\n \\\n --Color_dithering to add color dithering into images\\n')\n if not os.path.exists(FLAGS.images_dir):\n print('Please specify the direction for package, you can use --images_dir')\n return\n if not os.path.exists(FLAGS.img_save_path):\n os.makedirs(FLAGS.img_save_path)\n \n \n list = os.listdir(FLAGS.images_dir) #列出文件夹下所有的目录与文件\n for i in range(0,len(list)):\n image_path = os.path.join(FLAGS.images_dir,list[i])\n try:\n im = Image.open(image_path)\n except:\n print('Open %s error!\\n'%(image_path))\n continue\n if FLAGS.translation:\n print('FLAG translation is specified, %s will be translated!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n translation(image_path, list[i], FLAGS.translation)\n if FLAGS.flip:\n print('FLAG flip is specified, %s will be filpped!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n flip(image_path, list[i])\n if FLAGS.contrast:\n print('FLAG contrast is specified, %s will be contrasted!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n aj_contrast(image_path, list[i])\n if FLAGS.rotation:\n print('FLAG rotation is specified, %s will be rotated!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n rotation(image_path, list[i], FLAGS.rotation)\n if FLAGS.G_noise:\n print('FLAG G_noise is specified, %s will be add Gaussian noise in!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n randomGaussian(image_path, list[i])\n if FLAGS.Color_dithering:\n print('FLAG Color_dithering is specified, %s will be add Color dithering in!->Store in %s\\n'%(image_path, FLAGS.img_save_path))\n randomColor(image_path, list[i])\n\nif __name__ == '__main__':\n tf.app.run()\n main()\n","sub_path":"Batch_data_expasion.py","file_name":"Batch_data_expasion.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401106329","text":"from model import *\nimport os\n\nmodel = firenet (224, 224, training=False)\nmodel.load(os.path.join(\"models/FireNet\", \"firenet\"),weights_only=True)\n\n# network input sizes\nrows = 224\ncols = 224\n\nframe = cv2.imread(\"images/protest.jpg\")\n# re-size image to network input size and perform prediction\nsmall_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA)\noutput = model.predict([small_frame])[0][0]\n\nif output > 0.5:\n print(\"au feu!\", output)\nelse:\n print(\"pas de feu\", output)\n","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"449671798","text":"import numpy as np\nfrom PIL import Image\nfrom scipy.signal import fftconvolve as convolve\n\n\n\ndef center_and_rotate(array, center, angle):\n \"\"\"\n Translate the input array so that the pixel indicated by 'center' coincides with the center of the output array\n and rotate by a given angle around that pixel.\n \n * Output center:\n Let 'size' be the number of pixels along one axis. In zero-based indexing, the index of the output 'center pixel' along that axis is defined as\n (size + 1) // 2,\n '//' is the integer division operator.\n * Output shape:\n same as array.shape\n * Interpolation for rotation:\n nearest\n * padding:\n input is assumed to be surrounded by pixels of constant value zero\n \n array - input array, first dimension is y axis, second is x axis\n center - center position as pixel index pair, fist is y index, second is x index\n angle - rotation angle in radians, positive angles refer to (the usual) counterclockwise rotation right-handed x,y axes\n \"\"\"\n sizeY, sizeX = array.shape\n centerY, centerX = center\n \n padXLow = sizeX - centerX - 1\n padXHigh = centerX\n padYLow = sizeY - centerY - 1\n padYHigh = centerY\n padding = [[padYLow, padYHigh], [padXLow, padXHigh]]\n padded = np.pad(array, padding, 'constant', constant_values=0)\n \n rotated = np.array(Image.fromarray(padded).rotate(-angle*180/np.pi, expand=False))\n \n cropXLow = (padXLow + padXHigh)//2\n cropXHigh = padXLow + padXHigh - cropXLow\n cropYLow = (padYLow + padYHigh)//2\n cropYHigh = padYLow + padYHigh - cropYLow\n \n cropped = rotated[cropYLow:rotated.shape[0]-cropYHigh, cropXLow:rotated.shape[1]-cropXHigh]\n return cropped\n\n\n\ndef central_similar_portion_xyslice(array, factor):\n \"\"\"\n Return slice tuple for the central section of array.\n \n array - the array to slice\n factor - relative part that is kept of each axis\n \"\"\"\n sizeY = array.shape[0]\n sizeX = array.shape[1]\n \n startY = int((sizeY*(1-factor))//2) \n stopY = sizeY - startY\n startX = int((sizeX*(1-factor))//2) \n stopX = sizeX - startX\n \n sx = slice(startX, stopX)\n sy = slice(startY, stopY)\n \n return (sy, sx)\n\n\n\ndef central_xyslice(array, ssize):\n \"\"\"\n Return slice tuple for the central section of array.\n \n array - the array to slice\n ssize - numbers of pixels in x and y direction as (ny, nx)\n \"\"\"\n sizeY = array.shape[0]\n sizeX = array.shape[1]\n \n ny = ssize[0]\n nx = ssize[1]\n \n startX = (sizeX - nx)//2\n stopX = startX + nx\n startY = (sizeY - ny)//2\n stopY = startY + ny\n \n sx = slice(startX, stopX)\n sy = slice(startY, stopY)\n \n return (sy, sx)\n\n\n\ndef circular_increase_mask(mask, diameter):\n \"\"\"\n Increase mask by pushing its edge out.\n \n Outward pushing is done in a circular manner.\n True or positive values are considered as inside, False or zero values are considered as outside.\n \n mask - mask array to increase\n radius - number of pixels to push the mask's edge\n \"\"\"\n def circle(diameter, a):\n \"\"\"\n Create circular mask.\n \n diameter - circle's diameter as number of pixels\n a - parameter for pixel discretisation\n \"\"\"\n assert(type(diameter)==int)\n\n # a = np.exp(-1e-16) - 1./np.sqrt(2)\n radius = diameter/2\n\n #x = np.linspace(-radius+0.5, radius-0.5, diameter, endpoint=True)\n x = np.linspace(-radius-0.5, radius+0.5, diameter+2, endpoint=True)\n y = x\n xx, yy = np.meshgrid(x, y)\n circle = np.array(((xx**2+yy**2)<=((radius+a)**2)), dtype=int)\n\n return circle\n \n n = np.sum(mask)\n c = circle(diameter, 0.)\n convolved = convolve(mask, c, mode='same')\n r = np.logical_not(np.isclose(convolved, np.zeros(shape=convolved.shape), rtol=0.1/n))\n return np.array(r, dtype=int)\n\n\n\ndef calc_angle(start, end):\n \"\"\"\n Calculate the angle in radians between a line segment and the x axis.\n \n start - coordinates of line segment start point as (y, x)\n end - coordinates of line esement end point as (y, x)\n \"\"\"\n startY, startX = start\n endY, endX = end\n distanceX = endX - startX\n distanceY = endY - startY\n angle = np.arctan2(distanceY, distanceX)\n return angle\n","sub_path":"src/generator/fowgas/packages/mysticetus/imaging.py","file_name":"imaging.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180564319","text":"def convert_decimal_to_binary(num):\n binary = bin(num).replace(\"0b\", \"\")\n return binary\n\n\ndef is_palindrome(str):\n backward_str = \"\"\n for i in range(len(str)):\n backward_str = backward_str + str[len(str) - (1 + i)]\n\n return backward_str.upper() == str.upper()\n\ndef palindromic_decimal_binary(limit):\n palindromes = []\n for i in range(limit):\n if is_palindrome(str(i)):\n binary = convert_decimal_to_binary(i)\n if is_palindrome(str(binary)):\n palindromes.append(i)\n\n return palindromes\n\n\ndef sum_list(nums):\n sum = 0\n for i in range(len(nums)):\n sum = sum + nums[i]\n return sum\n\n\npalindromic_dec_bin = palindromic_decimal_binary(1000000)\nprint(palindromic_dec_bin)\nprint(len(palindromic_dec_bin))\n\nsum_palindromes = sum_list(palindromic_dec_bin)\nprint(sum_palindromes)\n","sub_path":"euler/problem36.py","file_name":"problem36.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"132109518","text":"import json\n\ndb = {}\nlable = {\"a\": \"address\", \"p\": \"phone\", \"s\": \"sex\"}\n\n\n# 将字典转换为json保存\ndef toJson(db: dict):\n json_str = json.dumps(db, indent=4)\n with open('db.json', 'w') as json_file:\n json_file.write(json_str)\n\n\n# 将json读取为字典\ndef toDict():\n with open('db.json', 'r') as f:\n return json.load(fp=f)\n\n\n# 添加员工的方法\ndef addEmp(db: dict):\n while True:\n uname = input(\"请输入要添加的员工的姓名:\")\n if uname.lower() == \"e\" or uname == \"\": break\n if uname in db:\n print(\"该员工已经存在\\n\")\n print(\"=======================================\")\n continue\n else:\n empInfo = input(\"请输入员工的信息(电话号码,住址,性别):\")\n listInfo = empInfo.split(\",\")\n db[uname] = {\"phone\": listInfo[0], \"address\": listInfo[1], \"sex\": listInfo[2]}\n toJson(db)\n break\n\n\n# 查询员工的方法\ndef queryEmp(db: dict):\n while True:\n uname = input(\"请输入要查询的员工的姓名:\")\n if uname.lower() == \"e\" or uname == \"\": break\n if uname not in db:\n print(f\"没有员工'{uname}'的相关信息\\n\")\n print(\"=======================================\")\n continue\n else:\n print(f\"该员工'{uname}'的信息如下:\\n\")\n print(f\"员工电话:{db[uname]['phone']}\\n\")\n print(f\"员工住址:{db[uname]['address']}\\n\")\n print(f\"员工性别:{db[uname]['sex']}\\n\")\n print()\n\n\n# 更新员工信息的方法\ndef updateEmp(db: dict):\n while True:\n uname = input(\"请输入要更新信息的员工的姓名:\\n\")\n if uname.lower() == \"e\" or uname == \"\": break\n if uname not in db:\n print(f\"该员工'{uname}'不存在\\n\")\n continue\n else:\n print(f\"该员工'{uname}'的信息如下:\\n\")\n print(f\"员工电话:{db[uname]['phone']}\\n\")\n print(f\"员工住址:{db[uname]['address']}\\n\")\n print(f\"员工性别:{db[uname]['sex']}\\n\")\n empInfo = input(\"请输入修改后的信息(电话号码,住址,性别):\")\n listInfo = empInfo.split(\",\")\n db[uname] = {\"phone\": listInfo[0], \"address\": listInfo[1], \"sex\": listInfo[2]}\n toJson(db)\n break\n\n\ndef delEmp(db: dict):\n while True:\n uname = input(\"请输入要删除的员工的姓名:\\n\")\n if uname.lower() == \"e\" or uname == \"\": break\n if uname not in db:\n print(f\"该员工'{uname}'不存在\\n\")\n continue\n else:\n del db[uname]\n toJson(db)\n\n\nif __name__ == '__main__':\n while True:\n db = toDict()\n print(db)\n option = input(\"请选择要进行的操作\\n\"\n \"A:添加员工\\n\"\n \"B:查询员工\\n\"\n \"C:更新员工信息\\n\"\n \"D:删除员工\\n\"\n \"E:退出\\n\")\n if option.lower() == \"a\":\n addEmp(db)\n print(db)\n print(\"=======================================\")\n elif option.lower() == \"b\":\n queryEmp(db)\n print(\"=======================================\")\n elif option.lower() == \"c\":\n updateEmp(db)\n print(\"=======================================\")\n elif option.lower() == \"d\":\n delEmp(db)\n print(\"=======================================\")\n elif option.lower() == \"e\" or option == \"\":\n print(\"感谢使用\\n\")\n print(\"=======================================\")\n break\n","sub_path":"PythonDemo/Day04/作业.py","file_name":"作业.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"267681742","text":"\n# coding: utf-8\n\n# In[5]:\n\n\nfrom datetime import datetime\nimport xml.etree.ElementTree\n\n\n# In[11]:\n\n\nlog=open(\"C:/Users/Shuting Zhao/Desktop/Archive/python/csr log.xml\").read()\ne = xml.etree.ElementTree.parse(\"C:/Users/Shuting Zhao/Desktop/Archive/python/csr log.xml\").getroot()\nec=e[0]\ncube=ec\n\n\n# In[7]:\n\n\ndef getlist(ec):\n l=[]\n for child in ec:\n print(child.tag,child.attrib,child.text)\ngetlist(ec[11][15][11][0])\n\n\n# In[97]:\n\n\ndef subset(ec):\n l=[]\n for child in ec:\n l.append(child.tag)\n return(l)\n\ndef xmlexplorer(ec,l=[],string=\"\",t=\"\"):\n if \"Title\" in subset(ec):\n if ec[0].text==\"Building\" or ec[0].text==\"Finalizing Build\" or ec[0].text==\"Initializing\":\n l.append(string+\"|\" + ec[2].text )\n xmlexplorer(ec[11],l,string,ec[2].text)\n elif ec[0].text==\"Successfully built\" or ec[0].text==\"Finalization Completed\" or ec[0].text==\"Initialization completed\":\n l[len(l)-1]=l[len(l)-1]+\"|\" + ec[3].text+\"|\"+str((datetime.strptime(ec[3].text, '%m/%d/%Y %I:%M:%S %p')-datetime.strptime(t, '%m/%d/%Y %I:%M:%S %p')).total_seconds())\n ##else: \n ##xmlexplorer(ec[11],l,string)\n else:\n for child in ec:\n xmlexplorer(child,l,string,t)\n return(l)\n\n\n# In[120]:\n\n\nlen(l)\n\n\n# In[119]:\n\n\nl=[]\nj=0\nfor i in range(0,len(cube[11])):\n \n if cube[11][i].attrib[\"CloudColumn\"]!=\"\":\n col=\"custom\"+\"|\"+cube[11][i].attrib[\"CloudColumn\"]\n j=1\n elif j>0 and cube[11][i][0].text==\"Building\":\n col=\"custom\"+\"|\"+\"table\"\n elif cube[11][i][0].text==\"Building\" :\n col=\"raw\"+\"|\"+\"table\"\n else:\n col=cube[11][i][0].text+\"|null\"\n xmlexplorer(cube[11][i],l,str(i)+\"|\"+col+\"|\"+cube[11][i].attrib[\"CloudTable\"])\nl\n\n\n# In[38]:\n\n\nt1=\"2/14/2018 5:25:54 AM\"\ndatetime.strptime(t1, '%m/%d/%Y %I:%M:%S %p')\n\n\n# In[117]:\n\n\nlen(cube[11])\n\n\n# In[118]:\n\n\nl=[]\nj=0\nfor i in range(2,78):\n \n if cube[11][i].attrib[\"CloudColumn\"]!=\"\":\n col=\"custom\"+\"|\"+cube[11][i].attrib[\"CloudColumn\"]\n j=1\n elif j>0:\n col=\"custom\"+\"|\"+\"null\"\n else:\n col=\"raw\"+\"|\"+\"null\"\n xmlexplorer(cube[11][i],l,str(i)+\"|\"+col+\"|\"+cube[11][i].attrib[\"CloudTable\"])\nl\n\n\n# In[85]:\n\n\nj\n\n","sub_path":"XML.py","file_name":"XML.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"282303647","text":"import os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nassert sys.version_info >= (3, 6, 0), \"stellar-sdk v2 requires Python 3.6+\"\n\nhere = os.path.abspath(os.path.dirname(__file__))\nrequirements_file = \"requirements.txt\"\n\nabout = {}\nwith open(\n os.path.join(here, \"stellar_sdk\", \"__version__.py\"), mode=\"r\", encoding=\"utf-8\"\n) as f:\n exec(f.read(), about)\n\nwith open(\"README.rst\", mode=\"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n description=about[\"__description__\"],\n long_description=readme,\n author=about[\"__author__\"],\n author_email=about[\"__author_email__\"],\n url=about[\"__url__\"],\n license=about[\"__license__\"],\n keywords=[\n \"stellar-sdk\",\n \"stellar.org\",\n \"lumens\",\n \"xlm\",\n \"blockchain\",\n \"distributed exchange\",\n \"cryptocurrency\",\n \"dex\",\n \"stellar-core\",\n \"horizon\",\n \"sdex\",\n \"trading\",\n ],\n project_urls={\n \"Documentation\": \"https://stellar-sdk.readthedocs.org\",\n \"Code\": \"https://github.com/StellarCN/py-stellar-base\",\n \"Issue tracker\": \"https://github.com/StellarCN/py-stellar-base/issues\",\n },\n include_package_data=True,\n install_requires=open(requirements_file).readlines(),\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n python_requires=\">=3.6.0\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"179846391","text":"import time\nimport pandas as pd\nfrom wikiapi import WikiApi\nwiki = WikiApi()\nimport re\nfrom gensim.models import Word2Vec\n#import word2vec\nfrom scipy.spatial.distance import cosine\nimport numpy as np\nimport heapq\n\nstart = time.time()\nmodel = Word2Vec.load_word2vec_format('/Users/liamconnell/Downloads/GoogleNews-vectors-negative300.bin', binary = True)\nlap1 = time.time()\nprint('data gathered: %s' % (lap1 - start))\n\n\n\n#not used\ndef get_longword(s):\n return heapq.nlargest(2, s, key=len)\n#max(re.split(' ', s), key=len)\n\n#not used\n#def get_wiki(k):\n # try:\n # return wiki.get_article(wiki.find(k)[0]).content\n # except:\n # return []\n\ndef get_wiki(q):\n\ttry:\n\t\treturn wiki.get_article(wiki.find(get_longword(q)[0])[0]).content\n\texcept:\n\t\ttry:\n\t\t\treturn wiki.get_article(wiki.find(get_longword(q)[1])[0]).content\n\t\texcept:\n\t\t\tprint('neither works')\n\t\t\treturn []\n\n\n\nindex2word_set = set(model.index2word)\nnum_features = 300\n\n\n\ndef get_avg_vec(words):\n\twords = re.split(' ', words)\n\tnwords = 0\n\tfeatureVec = np.zeros((num_features,),dtype=\"float32\")\n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec\n\ndef get_winner_from_avecs(row):\n\ttry:\t\n\t\tdists = []\n\t\tfor col in row['answerA':'answerD']:\n\t\t#TODO: if 'not' in words: 1-avec\n\t\t\tavec = get_avg_vec(col)\n\t\t\t#if 'not' in re.split(' ',col):\n\t\t\t#\tavec = 1-avec\n\t\t\t#\tprint('NOT')\n\t\t\t#why nan errors?\n\t\t\tif sum(np.isfinite(avec)) < 300:\n\t\t\t\tprint('avec')\n\t\t\t\tprint(avec)\n\t\t\tif sum(np.isfinite(row['qvec'])) < 300:\n\t\t\t\tprint('qvec')\n\t\t\t\tprint(row['qvec'])\n\t\t\tdist = cosine(avec, row['qvec'])\n\t\t\tdists.append(dist)\n\t\tm = min(dists)\n\t\tbest = [i for i, j in enumerate(dists) if j == m]\n\t\tif best == [0]:\n\t\t\treturn 'A'\n\t\tif best == [1]:\n\n\t\t\treturn 'B'\n\t\tif best == [2]:\n\t\t\treturn 'C'\n\t\tif best == [3]:\n\t\t\treturn 'D'\n\t\telse:\n\t\t\tprint('returning C becuase no best cosine')\n\t\t\tprint(len(best))\n\t\t\treturn 'C'\n\texcept:\n\t\t#print('some error, returning C')\n\t\treturn 'C'\n\t\t\n\n\n\n\ndef overlap(answw, words):\n count = 0\n for word in re.split(' ', answw):\n if word in words:\n count = count+1\n return count\n\ndef compete(row):\n lis = []\n #print(row)\n for col in row['answerA':'answerD']:\n lis.append(overlap(col, row['words']))\n return lis\n\n\ndef answerit(lis):\n #print(lis)\n m = max(lis)\n return [i for i, j in enumerate(lis) if j == m]\n\n\ndef convert(g):\n if len(g) == 1:\n if g == [0]:\n return 'A'\n if g == [1]:\n return 'B'\n if g == [2]:\n return 'C'\n if g == [3]:\n return 'D'\n else:\n return 'C'\n\n\nccount = 0\nstart = time.time()\n#data = pd.read_csv('../input/training_set.tsv', '\\t')\n#data = pd.read_csv('../input/validation_set.tsv', '\\t')\ndata = pd.read_csv('../input/validation_set_mod2.csv')\nlap1 = time.time()\nprint('data gathered: %s' % (lap1 - start))\n\n\n#data['keyword'] =data['question'].apply(get_longword)\nlap2 = time.time()\n#print('longword: %d' % (lap2 - lap1))\n\n#data['words'] = data['question'].apply(get_wiki)\nlap3 = time.time()\n#print('get wiki: %d' % (lap3 - lap2))\n\n#save dataset with wiki\n#data.to_csv('../input/validation_set_mod2.csv')\n\ndata['qvec'] = data.words.apply(get_avg_vec)\nlapa = time.time()\nccount = 0\nprint('get qvec: %d' % (lapa - lap3))\nprint(ccount)\ndata['closest_avec'] = data.apply(get_winner_from_avecs, axis=1)\n\n#data['comp'] = data.apply(compete, axis = 1)\n#lap4 = time.time()\n#print('comp: %d' % (lap4 - lap3))\n#data['guess'] = data.comp.apply(answerit)\n#lap5 = time.time()\n#print('guess: %d' % (lap5 - lap4))\n#data['sub'] = data.guess.apply(convert)\n#lap6 = time.time()\nlapb = time.time()\nprint('get wiki: %d' % (lapb - lapa))\n\n\nsample = pd.read_csv('../input/sample_submission.csv')\nsample['correctAnswer'] = data['closest_avec']\nlap7 = time.time()\nprint('read and sub: %d' % (lap7 - lapb))\n#sub = open('../output/submission.csv', 'w')\nsample.to_csv('../output/w2v_avg22.csv', index=False)\nlap8 = time.time()\nprint('write csv: %d' % (lap8 - lap7))\n\nprint('ccount')\nprint(ccount)\n","sub_path":"word2vec1.py","file_name":"word2vec1.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"188787470","text":"from login.forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nimport logging\n\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n@csrf_protect\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n try:\n user = User.objects.create_user(\n first_name = form.cleaned_data['first_name'],\n last_name = form.cleaned_data['last_name'],\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'],\n email=form.cleaned_data['email']\n )\n logger.info(\"Created User with username: [\", form.cleaned_data['username'], \"] and email: [\", form.cleaned_data['email'], \"]\")\n return HttpResponseRedirect('/register/success/#clients')\n except Exception as e:\n logger.error(\"Error in creating user with username: [\", form.cleaned_data['username'], \"] Error: \", e)\n return render_to_response(\n 'registration/register.html'\n )\n else:\n form = RegistrationForm()\n variables = RequestContext(request, {\n 'form': form\n })\n\n return render_to_response(\n 'registration/register.html',\n variables,\n )\n\ndef register_success(request):\n return render_to_response(\n 'registration/success.html',\n )\n\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n@login_required\ndef home(request):\n return render_to_response(\n 'home.html',\n { 'user': request.user }\n )","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"617259743","text":"# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nfrom common.functions import softmax, cross_entropy_error\nfrom common.gradient import numerical_gradient\n\n\nclass simpleNet:\n def __init__(self):\n self.W = np.random.randn(2,3) # 정규분포로 초기화\n\n def predict(self, x):\n return np.dot(x, self.W)\n\n def loss(self, x, t):\n z = self.predict(x)\n y = softmax(z)\n loss = cross_entropy_error(y, t)\n\n return loss\n\nx = np.array([0.6, 0.9])\nt = np.array([0, 0, 1])\n\nnet = simpleNet()\n\nf = lambda w: net.loss(x, t)\ndW = numerical_gradient(f, net.W)\n\nprint(dW)\n","sub_path":"ch04/gradient_simplenet.py","file_name":"gradient_simplenet.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"44201101","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\n\n\n_root_dir = os.path.dirname(os.path.abspath(__file__))\n_out_dir = os.path.join(_root_dir, 'out')\n_download_dir = os.path.join(_out_dir, 'download')\n_url_root = 'https://bintool.s3.amazonaws.com/'\n\n\ndef main():\n _make_dir(_download_dir)\n gcc_archive_path = _download('gcc-4.7.2.tar.bz2',\n 'cc308a0891e778cfda7a151ab8a6e762')\n _extract(gcc_archive_path)\n\n\ndef _curl(source_url, target_path):\n arguments = (\n 'curl',\n '--output',\n target_path,\n source_url\n )\n\n subprocess.check_call(arguments)\n\n\ndef _download(filename, signature):\n target_path = os.path.join(_download_dir, filename)\n\n if not os.path.exists(target_path):\n source_url = _url_root + filename\n print('Downloading ' + filename)\n _curl(source_url, target_path)\n\n actual_signature = _md5(target_path)\n\n if actual_signature != signature:\n os.remove(target_path)\n sys.exit('Bad signature: ' + filename)\n\n return target_path\n\n\ndef _extract(archive_path):\n extract_dir = _get_extract_dir(archive_path)\n extract_path = os.path.join(_download_dir, extract_dir)\n\n if not os.path.exists(extract_path):\n arguments = (\n 'tar',\n '-x', # Extract\n '-f', # File\n archive_path\n )\n\n print('Extracting ' + extract_dir)\n subprocess.check_call(arguments, cwd=os.path.dirname(extract_path))\n\n return extract_path\n\n\ndef _get_extract_dir(archive_path):\n arguments = (\n 'tar',\n '-t', # List\n '-f', # File\n archive_path\n )\n\n process = subprocess.Popen(arguments, stdout=subprocess.PIPE)\n extract_dir = process.stdout.readline().strip('/\\n')\n process.terminate()\n\n return extract_dir\n\n\ndef _make_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n\ndef _md5(path):\n arguments = (\n 'md5',\n '-q', # Quiet\n path\n )\n\n return subprocess.check_output(arguments).strip()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"126102489","text":"import networkx as nx\n\n\nclass MealyMachine:\n _filename = None\n _nx = None\n _s0 = None\n _graph = None\n\n def __init__(self, fname):\n self._filename = fname\n self._nx = nx.drawing.nx_pydot.read_dot(fname)\n self._s0 = [_nod for _nod in self._nx.neighbors('__start0')][0]\n self._graph = dict()\n\n for _i in self._nx.adj:\n if _i == '__start0': continue\n self._graph[_i] = dict()\n for _j in self._nx.adj[_i]:\n for _label in self._nx.adj[_i][_j]:\n _in, _out = str(self._nx.adj[_i][_j][_label]['label']).replace('\\\"', '').split(' / ')\n self._graph[_i][_in] = [_out, _j]\n\n def get_states(self):\n return [n for n in self._graph.keys()]\n\n def next_state(self, i, label):\n return self._graph[i][label]\n\n def list_destination(self, inputs):\n curr = self._s0\n states = []\n for an_in in inputs:\n if an_in == 'Reset':\n curr = self._s0\n else:\n curr = self._graph[curr][an_in][1]\n states.append(curr)\n return states\n\n def list_origin(self, inputs):\n curr = self._s0\n states = []\n for an_in in inputs:\n states.append(curr)\n if an_in == 'Reset':\n curr = self._s0\n else:\n curr = self._graph[curr][an_in][1]\n\n return states\n\n\n def list_outputs(self, inputs):\n curr = self._s0\n outputs = []\n for an_in in inputs:\n if an_in == 'Reset':\n outputs.append(\"Reset\")\n curr = self._s0\n else:\n outputs.append(self._graph[curr][an_in][0])\n curr = self._graph[curr][an_in][1]\n\n return outputs\n\n# mm = MealyMachine(\"learnedModel.dot\")\n#\n# print(mm.list_origin([\"ApplicationDataEmpty\"]))\n# print([\"ApplicationDataEmpty\"])\n# print(mm.list_outputs([\"ApplicationDataEmpty\"]))\n# print(mm.list_destination([\"ApplicationDataEmpty\"]))","sub_path":"timedIO/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"464347258","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 10 Feb 2021\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nsource repo: scs_analysis\n\nDESCRIPTION\nThe sample_distance utility is used to find the distance (in kilometres, to the nearest metre) between a given position,\nand the position in each of the input GPS JSON documents. A command flag specifies the path to the node within the\ndocument that is to be examined.\n\nThe quality of the GPS fix may be taken into account: if a quality is specified, then any GPS fix with a quality\n(rounded to the nearest integer) below that level is reported as a null distance.\n\nA simple, spherical model of the earth is used.\n\nSYNOPSIS\nsample_distance.py -p LAT LNG [-i ISO] [-q QUALITY] [-v] GPS_PATH\n\nEXAMPLES\ncsv_reader.py -v scs-ph1-10-status-H2-15min.csv | \\\nsample_distance.py -v -p 51.4889752 -0.4418752 -q 1 val.gps | \\\ncsv_writer.py -v scs-ph1-10-distance-H2-15min.csv\n\nDOCUMENT EXAMPLE - INPUT\n{\"rec\": \"2020-12-09T21:00:00Z\", \"val\": {\"gps\": {\"pos\": [51.48877673, -0.44155907], \"elv\": 33.3, \"qual\": 1.0},\n\"tag\": \"scs-ph1-10\"}\n\nDOCUMENT EXAMPLE - OUTPUT\n{\"rec\": \"2020-12-09T21:00:00Z\", \"gps\": {\"pos\": [51.48877673, -0.44155907], \"elv\": 33.3, \"qual\": 1}, \"dist\": 0.031}\n\nRESOURCES\nGetting distance between two points based on latitude/longitude\nhttps://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude\n\"\"\"\n\nimport sys\n\nfrom scs_analysis.cmd.cmd_sample_distance import CmdSampleDistance\n\nfrom scs_core.data.json import JSONify\nfrom scs_core.data.path_dict import PathDict\n\nfrom scs_core.position.gps_datum import GPSDatum\nfrom scs_core.position.position import Position\n\nfrom scs_core.sys.logging import Logging\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n document_count = 0\n processed_count = 0\n\n # ----------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdSampleDistance()\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n Logging.config('sample_distance', verbose=cmd.verbose)\n logger = Logging.getLogger()\n\n logger.info(cmd)\n\n try:\n # ------------------------------------------------------------------------------------------------------------\n # resources...\n\n origin = Position(cmd.lat, cmd.lng)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run...\n\n min_datum = None\n\n for line in sys.stdin:\n datum = PathDict.construct_from_jstr(line)\n\n if datum is None:\n continue\n\n document_count += 1\n\n if not datum.has_sub_path(cmd.iso):\n logger.error(\"ISO node '%s' not present: %s\" % (cmd.iso, line.strip()))\n exit(1)\n\n if not datum.has_sub_path(cmd.path):\n logger.error(\"GPS node '%s' not present: %s\" % (cmd.path, line.strip()))\n exit(1)\n\n gps_node = datum.node(cmd.path)\n\n try:\n gps = GPSDatum.construct_from_jdict(gps_node)\n distance = gps.distance(origin, minimum_acceptable_quality=cmd.quality)\n\n except TypeError:\n gps = None\n distance = None\n\n report = PathDict()\n report.append('rec', datum.node(cmd.iso))\n report.append('gps', gps)\n report.append('dist', distance)\n\n print(JSONify.dumps(report))\n\n processed_count += 1\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # end...\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n\n finally:\n logger.info(\"documents: %d processed: %d\" % (document_count, processed_count))\n","sub_path":"src/scs_analysis/sample_distance.py","file_name":"sample_distance.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"426624960","text":"from time import sleep as sp\nfrom __dados__ import Dados as dd\nfrom os import system as st\n\n\nclass Ostra(object):\n st('cls')\n def __init__(self, temperatura):\n self.temperatura = temperatura\n self.contador = 0\n self.minute = 1\n\n def __not__(self):\n __doc__ = ''' método para receber notificação da ostra '''\n while True:\n print(f'A temperatura de {self.temperatura} graus célsius está em stress')\n sp(self.minute)\n self.contador += 1\n self.temperatura += 1\n # st('cls')\n if self.temperatura >= 31:\n print(f'\\033[31mQue pena, a ostra está morta. \\nEla teve uma temperatura de {self.temperatura} graus célsius\\033[m')\n break\n \n\nif __name__ == '__main__':\n ostra = Ostra(20)\n ostra.__not__()\n","sub_path":"projeto.py","file_name":"projeto.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"410129498","text":"import re\nimport logging\nimport traceback\n\nfrom twisted.internet import reactor\nfrom twisted.web.client import Agent, RedirectAgent, HTTPConnectionPool\nfrom twisted.web.http_headers import Headers\nfrom twisted.internet.protocol import Protocol\n\nlogger = logging.getLogger(__name__)\n\nEVENT_SOURCE_CONNECTING = 0\nEVENT_SOURCE_OPEN = 1\nEVENT_SOURCE_CLOSED = 2\n\nKEY_VALUE_LINE_MATCHER = re.compile('(.*?): ?(.*)')\n\n\nclass MessageEvent:\n def __init__(self, message_type, message_data):\n self.type = message_type\n self.data = message_data\n\n\nclass LineBuffer:\n def __init__(self):\n self.last_char_was_cr = False\n self.string_data = ''\n\n def reset(self):\n self.last_char_was_cr = False\n self.string_data = ''\n pass\n\n def add(self, data, line_callback):\n \"\"\"\n :type data: str\n \"\"\"\n for char in data:\n\n if char == '\\n':\n line_callback(self.string_data)\n self.reset()\n else:\n if self.last_char_was_cr:\n line_callback(self.string_data)\n self.reset()\n\n if char == '\\r':\n self.last_char_was_cr = True\n else:\n self.string_data += char\n\n\nclass StreamHandler(Protocol):\n def __init__(self, _event_source):\n self.event_source = _event_source\n self.line_buffer = LineBuffer()\n self.event_type = None\n self.event_data = None\n\n def _line_handler(self, line):\n match_result = KEY_VALUE_LINE_MATCHER.match(line)\n\n if match_result is not None:\n key = match_result.group(1)\n value = match_result.group(2)\n if key == 'event':\n self.event_type = value\n elif key == 'data':\n self.event_data = value\n elif line == '':\n # dispatch the event\n event = MessageEvent(self.event_type, self.event_data)\n\n # noinspection PyBroadException\n try:\n self.event_source.onmessage(event)\n except Exception:\n logger.error('error in onmessage: ' + traceback.format_exc())\n finally:\n self.event_type = None\n self.event_data = None\n else:\n logger.warn('unhandled event line: ' + line)\n\n def close(self):\n if self.transport is not None:\n self.transport.stopProducing()\n\n def dataReceived(self, data):\n \"\"\"\n http://www.w3.org/TR/eventsource/#event-stream-interpretation\n\n :type data: str\n \"\"\"\n logger.debug('data received: ' + str(data).strip())\n\n self.event_source.ready_state = EVENT_SOURCE_OPEN\n\n if self.event_source.onmessage is not None:\n self.line_buffer.add(data, self._line_handler)\n\n # noinspection PyMethodOverriding\n def connectionLost(self, reason):\n logger.info('connection lost: ' + reason.getErrorMessage())\n self.event_source.ready_state = EVENT_SOURCE_CLOSED\n\n\nclass EventSource:\n def __init__(self, _reactor):\n self.reactor = _reactor\n self.ready_state = EVENT_SOURCE_CLOSED\n self.request_future = None\n self.stream_handler = None\n\n self.onopen = None\n self.onmessage = None\n self.onerror = None\n\n self.headers = Headers({'Accept': ['text/event-stream']})\n\n pool = HTTPConnectionPool(self.reactor, persistent=False)\n self.agent = RedirectAgent(Agent(self.reactor, pool=pool))\n\n def add_raw_header(self, name, value):\n self.headers.addRawHeader(name, value)\n\n def open(self, url):\n if self.ready_state is not EVENT_SOURCE_CLOSED:\n self.close()\n\n self.ready_state = EVENT_SOURCE_CONNECTING\n\n logger.info('opening connection to ' + url)\n\n self.request_future = self.agent.request(\n 'GET',\n url,\n self.headers,\n None)\n\n self.request_future.addCallback(self._request_callback)\n self.request_future.addErrback(self._request_err_callback)\n\n def close(self):\n if self.request_future is not None:\n self.request_future.cancel()\n\n if self.stream_handler is not None:\n self.stream_handler.close()\n\n def _request_callback(self, response):\n self.stream_handler = StreamHandler(self)\n response.deliverBody(self.stream_handler)\n\n def _request_err_callback(self, result):\n logger.debug('request error: ' + str(result))\n\n if self.onerror is not None:\n self.onerror(result)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n event_source = EventSource(reactor)\n\n event_source.onmessage = lambda data: logger.info('onmessage type: ' + data.type + ' data: ' + data.data)\n\n event_source.open('https://automatar.firebaseio.com/test.json')\n\n # noinspection PyUnresolvedReferences\n reactor.run()\n","sub_path":"server_sent_events.py","file_name":"server_sent_events.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"9970949","text":"import datetime\nimport os\nfrom json import dumps\nfrom random import choice, sample\nfrom uuid import uuid4\n\nimport pytest\nfrom settings.exceptions import RESTConnectionError\nfrom steps.cdb import select_cdb_document\nfrom steps.object import correspond_selected_object, get_item_from\nfrom steps.rest.interface import OnlineInterface\nfrom steps.rest_deprecated.get import request_item_get\nfrom steps.rest_deprecated.post import request_item_post\nfrom steps.waitings import waiting_tasks_processing\nfrom utils.getter import gen_couchdb_id, get_time_in_format\n\n\n# @pytest.mark.usefixtures(\"reset_layer\")\n@pytest.mark.debug_case_xer\n@pytest.allure.feature('DEBUG Feature')\n@pytest.allure.story('00. Отдельный тест для отладки')\n# @pytest.mark.xfail()\ndef test_case1(rest_api, psql):\n \"\"\"\n Создание (изменение, удаление) склада, места приготовления, места реализации\n :param rest_api:\n :param psql:\n :return:\n \"\"\"\n\n print(dumps(\n rest_api.select_objects('report.datasources.payment_types', params={\n 'custom_params': (\n ('vid', 2),\n ('mode', 'previous7Days'),\n ('groupField[]', ''),\n ('groupDir[]', ''),\n ('filterField[]', 'operationType'),\n ('filterOperator[]', 'contains'),\n ('filterValue[]', 'FISCAL'),\n ('filterField[]', 'operationType'),\n ('filterOperator[]', 'contains'),\n ('filterValue[]', 'NONFISCAL'),\n ('timeZone', -300),\n )\n }),\n indent=4,\n ensure_ascii=False,\n sort_keys=True\n ))\n pass\n\n\n@pytest.mark.usefixtures(\"reset_layer\")\n@pytest.mark.debug_case_ws_listener\ndef test_ribbons(context, rest, psql, ws_listener):\n store_list = request_item_get(context, rest, 'warehouse.store').get('ds')\n\n store_1 = get_item_from(store_list, 'single', 'table', {\n 'title': 'Склад 1'\n })\n # Получаем список единиц измерения\n measure_list = request_item_get(context, rest, 'core.dictionaries.measureunits').get('ds')\n\n measure_kg = get_item_from(measure_list, 'single', 'table', {\n 'title': 'кг'\n })\n\n provider_1 = request_item_post(context, rest, 'warehouse.providers', 'Organization',\n 'create', {\n 'shortName': 'Шаражкина Конторка №%s' % gen_couchdb_id(),\n })\n with pytest.allure.step('Создаем товар'):\n # создаем товар\n singleproduct_1 = request_item_post(context, rest, 'warehouse.nomenclature.singleproduct', 'item', 'create',\n {\n 'name': 'Товар №%s' % gen_couchdb_id(),\n 'designator': 'single',\n 'ratio': 1,\n })\n\n with pytest.allure.step('Создаем блюдо'):\n dish_1 = request_item_post(context,\n rest,\n 'warehouse.nomenclature.dish',\n 'item',\n 'create',\n {\n 'name': 'Блюдо №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n })\n\n with pytest.allure.step('Выбираем \"Блюдо 1\" и добавляем ему запись в технологическую карту (\"Товар 1\", 2 шт)'):\n request_item_post(context, rest, 'warehouse.nomenclature.routecard', 'item', 'create', {\n 'cardProduct': singleproduct_1,\n 'cardProductEffectiveAmount': 2,\n 'cardProductQty': 2,\n 'finalWeight': 2,\n 'grossWeightBaseMeasureUnit': 2,\n 'grossWeightKg': 2,\n 'netWeight': 2,\n }, owner_object=dish_1, object_version=1)\n\n with pytest.allure.step('Создаем приходную накладную'):\n # создаем приходную нкаладную\n incoming = request_item_post(context, rest, 'warehouse.documents.incoming', 'item', 'create',\n {\n 'provider': provider_1,\n 'store': store_1,\n 'invoiceDate': get_time_in_format(format_time='invoicedate'),\n })\n\n with pytest.allure.step('Добавляем товар в приходную накладную'):\n request_item_post(\n context,\n rest,\n 'warehouse.documents.items.common',\n 'item',\n 'create',\n payload_table={\n 'product': singleproduct_1,\n 'actualAmount': 1,\n 'measureUnit': measure_kg,\n 'price': 100,\n 'fixedTotalSumWithoutVat': 100,\n 'fixedTotalSum': 100\n },\n owner_object=incoming\n )\n\n with pytest.allure.step('Проводим приходную накладную'):\n request_item_post(context,\n rest,\n 'warehouse.documents.incoming',\n 'item',\n 'action',\n payload_table={\n 'ids': [incoming.get('id')],\n 'actionName': 'process'\n })\n\n waiting_tasks_processing(psql)\n\n dish_list = request_item_get(context, rest, 'warehouse.nomenclature.dish')['ds']\n\n correspond_selected_object(dish_list, {'id': dish_1['id']},\n {'storeQuantity': 0,\n 'currentPrimeCost': 200})\n\n with pytest.allure.step('Выбираем \"Блюдо 1\" и добавляем ему запись в технологическую карту (\"Товар 1\", 2 шт)'):\n request_item_post(context, rest, 'warehouse.nomenclature.routecard', 'item', 'create', {\n 'cardProduct': singleproduct_1,\n 'cardProductEffectiveAmount': 3,\n 'cardProductQty': 3,\n 'finalWeight': 3,\n 'grossWeightBaseMeasureUnit': 3,\n 'grossWeightKg': 3,\n 'netWeight': 3,\n }, owner_object=dish_1, object_version=1)\n\n dish_list = request_item_get(context, rest, 'warehouse.nomenclature.dish')['ds']\n\n ws_listener.send_reprocess_action()\n waiting_tasks_processing(psql)\n\n correspond_selected_object(dish_list, {'id': dish_1['id']},\n {'storeQuantity': 0,\n 'currentPrimeCost': 500})\n\n\n@pytest.mark.test_rest_api\n@pytest.mark.usefixtures('reset_layer')\ndef test_class_rest(rest_api, franchise, psql):\n group_b = rest_api.send_create(\n 'warehouse.nomenclature.dish',\n 'group',\n {\n 'name': 'Группа №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n }\n )\n rest_api.send_create(\n 'warehouse.nomenclature.dish',\n 'item',\n {\n 'name': 'Блюдо №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n },\n params={\n 'parent_object': group_b\n }\n )\n\n group = rest_api(franchise.rest(1)).send_create(\n 'warehouse.nomenclature.dish',\n 'group',\n {\n 'name': 'Группа №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n }\n )\n dish = rest_api(franchise.rest(1)).send_create(\n 'warehouse.nomenclature.dish',\n 'item',\n {\n 'name': 'Блюдо №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n },\n params={\n 'parent_object': group\n }\n )\n\n dish.update(\n {\n 'name': 'Человечина %s' % gen_couchdb_id()\n }\n )\n\n dish = rest_api(franchise.rest(1)).send_update(\n 'warehouse.nomenclature.dish',\n dish\n )\n dish1 = rest_api(franchise.rest(1)).send_create(\n 'warehouse.nomenclature.dish',\n 'item',\n {\n 'name': 'Блюдо тех №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n },\n )\n\n rest_api(franchise.rest(1)).send_create(\n 'warehouse.nomenclature.routecard',\n 'item',\n {\n 'cardProduct': dish1,\n 'cardProductEffectiveAmount': 3,\n 'cardProductQty': 3,\n 'finalWeight': 3,\n 'grossWeightBaseMeasureUnit': 3,\n 'grossWeightKg': 3,\n 'netWeight': 3,\n },\n params={\n 'owner_object': dish\n }\n )\n rest_api.send_create(\n 'warehouse.nomenclature.dish',\n 'item',\n {\n 'name': 'Блюдо last №%s' % gen_couchdb_id(),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING',\n },\n params={\n 'parent_object': group_b\n }\n )\n\n psql.execute('SELECT name, tenant_id FROM storeitem')\n print(\n '\\n'.join(\n map(\n lambda v: str(v),\n psql.fetchall()\n )\n )\n )\n\n\n@pytest.mark.parametrize(\"index\", range(int(os.getenv('COUNT_ITERATION', 1000))))\n@pytest.mark.test_terminal\n@pytest.mark.usefixtures('reset_layer')\ndef test_terminal_emulator(rest_api, terminal, psql, index):\n sale_place_list = rest_api.select_objects('warehouse.nomenclature.sale_place')['ds']\n sale_place_1 = get_item_from(sale_place_list, condition='random')\n terminal.new('Тестовый %s' % index, psql)\n terminal_back_list = rest_api.select_objects('front.terminals.ipad')['ds']\n terminal_back_1 = get_item_from(terminal_back_list, table={'deviceId': terminal.id})\n\n rest_api.send_action(\n 'front.terminals.ipad',\n payload_table={\n 'actionName': \"activate\",\n 'data': {\n 'className': 'terminalModal',\n 'name': 'Пробник 1',\n 'salePlace': sale_place_1,\n 'terminalType': 'QUICK_POS',\n 'timeZone': -300\n },\n 'ids': [\n terminal_back_1['id']\n ]\n }\n )\n\n waiting_tasks_processing(psql)\n terminal.state = 'active'\n\n print(terminal.id, terminal.state, terminal.terminal_command, index)\n\n\n@pytest.mark.gen_60_dish\n# @pytest.mark.usefixtures('reset_layer')\ndef test_generate_dish(rest_api):\n for i in range(3):\n try:\n dish = rest_api.send_create(\n 'warehouse.nomenclature.dish',\n 'item',\n {\n 'name': 'Блюдо №%s_%s' % (i, uuid4()),\n 'designator': 'dish',\n 'routeWithdrawType': 'BYROUTING'\n },\n )\n print('Создано блюдо %s, c id %s' % (dish['name'], dish['id']))\n rest_api.connect.cookies.clear()\n except RESTConnectionError:\n print('ERROR!!')\n\n\n@pytest.mark.widget_select_debug\ndef test_widget_select(rest_api):\n widget = rest_api.select_objects('dashboard.widget.dishtopbyperiod')\n rest_api.connect.cookies.clear()\n widget = rest_api.select_objects('dashboard.widget.dishtopbyperiod')\n print('ololo')\n\n\n@pytest.mark.timezones_debug\ndef test_tz_debug(rest_api):\n # Время с фронта за нужный период timeZome: 360\n etalon = 1482170340000\n etalon1 = 1484765940000\n rest_api.gmt = -6\n time1 = get_time_in_format('invoicedate',\n set_date=datetime.date(2016, 12, 19),\n set_time=datetime.time(second=0, hour=11, minute=59),\n gmt=rest_api.gmt\n )\n time2 = get_time_in_format('invoicedate',\n set_date=datetime.date(2017, 1, 18),\n set_time=datetime.time(second=0, hour=12, minute=59),\n gmt=rest_api.gmt\n )\n\n time_minus_30_day = get_time_in_format('invoicedate',\n offset_min=-24 * 30 * 60,\n set_time=datetime.time(second=0, hour=12, minute=0),\n gmt=rest_api.gmt\n )\n time_plus_2_days = get_time_in_format('invoicedate',\n offset_min=24 * 2 * 60,\n set_time=datetime.time(second=0, hour=12, minute=0),\n gmt=rest_api.gmt\n )\n\n etalon_end = 1484895600000\n\n assert not any([round((etalon - time1) / 1000), round((etalon1 - time2) / 1000)])\n\n\n@pytest.mark.interface_operation_debug\ndef test_interface_operation_debug(couchdb):\n online_interface = select_cdb_document(couchdb, 'online_interface-')[0]\n interface = OnlineInterface(online_interface)\n\n result = interface.post('operationHistory', {\n 'customerToken': {\"key\": \"QWER\", \"type\": \"card\", \"entry\": \"barCode\"},\n 'accountType': {\n 'accountGuid': 'bonus_account_type-1'\n }\n })\n\n\n@pytest.mark.credit_hold_debug\ndef test_interface_credit_debug(couchdb):\n online_interface = select_cdb_document(couchdb, 'online_interface-')[0]\n interface = OnlineInterface(online_interface)\n\n for i in range(700):\n debit_time = datetime.datetime.now().replace(microsecond=0) - datetime.timedelta(hours=i * 5)\n print(\"I Запрос %s\" % i)\n interface.post('creditHold', {\n \"date\": debit_time.strftime('%Y-%m-%dT%H:%M:%S'),\n \"customerToken\": {\"type\": \"phone\", \"entry\": \"manual\", \"key\": \"9008001050\"},\n \"accountType\": {\"accountGuid\": \"bonus_account_type-1\"},\n \"amount\": i + 1\n })\n for i in range(700):\n debit_time = datetime.datetime.now().replace(microsecond=0) - datetime.timedelta(hours=i * 5)\n print(\"II Запрос %s\" % i)\n interface.post('creditHold', {\n \"date\": debit_time.strftime('%Y-%m-%dT%H:%M:%S'),\n \"customerToken\": {\"type\": \"phone\", \"entry\": \"manual\", \"key\": \"9008001050\"},\n \"accountType\": {\"accountGuid\": \"bonus_account_type-2\"},\n \"amount\": i + 1\n })\n\n\n@pytest.mark.terminal_with_online_interface\ndef test_terminal_with_online_interface(terminal):\n column = ['sale_id', 'cook_id', 'price']\n\n def get_dict_with_couchdb(cdb_document):\n return (\n dict(\n zip(column, (v, *sorted(t.values(), key=lambda x: str(x), reverse=1))),\n product_id=cdb_document['_id'],\n name=cdb_document['name'],\n parent=cdb_document.get('parentCategoryDocId'),\n )\n for v, t in cdb_document['saleScheme'].items()\n ) if cdb_document['saleScheme'] else []\n\n def create_order(product, guest):\n terminal.guests.default()\n terminal.guests.current_guest.auth_with_online_interface(guest['tokens'][0], terminal.id)\n\n terminal.guests.items = product\n terminal.precheck.create().save()\n # Перезопрашиваем список скидок, специально, что бы могли при отладке могли на горячую подхватывать н��вые скидки\n terminal.guests.apply_discounts(select_cdb_document(terminal.connect, 'crm_discount-'))\n\n terminal.order.create().time(offset_min=10)\n terminal.order.order_items = terminal.guests\n terminal.order.save()\n\n terminal.cdb_document.update(select_cdb_document(terminal.connect, 'terminal-')[0])\n\n terminal.online_interface = OnlineInterface(select_cdb_document(terminal.connect, 'online_interface-')[0])\n guests = terminal.online_interface.search('0', terminal.id)\n\n terminal.scheme_id = get_item_from(\n select_cdb_document(terminal.connect, 'tables_scheme-'),\n table={\n 'title': 'Заведение 1'\n }\n\n )['refId']\n terminal.table_id = get_item_from(\n select_cdb_document(terminal.connect, 'table-'),\n table={\n 'tablesSchemeDocId': terminal.scheme_id\n }\n )['_id']\n product_list = [\n item\n for v in select_cdb_document(terminal.connect, 'product-')\n for item in get_dict_with_couchdb(v)\n ]\n\n terminal.order.payments_params['list_type_pay'] = [\n v for v in select_cdb_document(terminal.connect, 'payment_type-')\n ]\n terminal.cashier = get_item_from(select_cdb_document(terminal.connect, 'user-'), condition='random')['_id']\n\n for _ in range(4):\n create_order(sample(product_list, 1), choice(guests))\n\n print()\n\n\n@pytest.mark.delete_shifts\ndef test_delete_shifts(rest_api):\n preorder = rest_api.select_objects('front.preorders')['ds']\n canc = rest_api.select_objects('front.cancellations')['ds']\n ench = rest_api.select_objects('front.encashment')['ds']\n\n tmp = rest_api.select_objects('front.zreport')['ds']\n print('Удаляем кассовые смены (%s)' % len(tmp))\n rest_api.send_remove('front.zreport', tmp)\n\n tmp = rest_api.select_objects('front.orders')['ds']\n print(' Ok\\nУдаляем чеки (%s)' % len(tmp))\n rest_api.send_remove('front.orders', tmp)\n\n print(' Ok\\nУдаляем пречеки (%s)' % len(preorder))\n rest_api.send_remove('front.preorders', preorder)\n\n print(' Ok\\nУдаляем отмены (%s)' % len(canc))\n rest_api.send_remove('front.cancellations', canc)\n\n print(' Ok\\nУдаляем инкассации (%s)' % len(ench))\n rest_api.send_remove('front.encashment', ench)\n print(' Ok')\n\n\n@pytest.mark.compare_json\ndef test_compare_json(couchdb):\n correspond_selected_object(\n select_cdb_document(couchdb, 'role-')[0],\n {\n \"className\": \"ru.edgex.platform.service.user.Role\",\n \"tokenOwner\": \"back\",\n \"disabled\": 'null',\n \"credentialsMap\": {\n 'FieldChildNone': None,\n 'FieldChildNull': 'null'\n },\n 'FieldNone': None,\n 'FieldNull': 'null',\n }\n )\n","sub_path":"scenarios/simple_test/test_debug_rest.py","file_name":"test_debug_rest.py","file_ext":"py","file_size_in_byte":19067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"200075735","text":"import subprocess\nimport os\nimport logging\nimport json\nimport time\n\ndef is_running_python3(script_name):\n proc_str = subprocess.check_output(['ps','-aef']).decode('utf-8')\n processes = proc_str.split('utf-8')\n for proc in processes:\n if (\"python3\" in proc) and (script_name in proc):\n print(proc)\n return True\n return False\n\ndef check_python3(script_name,script_location = \"\",args = []):\n if script_location == \"\":\n rel = os.path.dirname(os.path.abspath(__file__))\n script_location = rel + \"/\" + script_name\n if not is_running_python3(script_name):\n logging.warning(script_name + \" isn't running. trying to start the process.\")\n try:\n p = subprocess.Popen([\"python3\",script_location] + args,\n cwd=\"/\",\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n time.sleep(2) \n if not is_running_python3(script_name):\n logging.warning(\"script not running after start attempt: \" + script_name)\n else:\n logging.info(\"success starting \" + script_name)\n except Exception as e:\n logging.warning(\"couldn't start \" + script_name + \" error: [\" + str(e) + \"]\")\n\n \ndef main():\n rel = os.path.dirname(os.path.abspath(__file__))\n logging.basicConfig(filename=rel + '/zimemr.log',level=logging.DEBUG,format='%(asctime)s %(message)s')\n scripts = json.load(open(rel + \"/running_scripts.json\",\"r\"))\n for script_name in scripts: \n check_python3(script_name)\n\nif __name__==\"__main__\":\n main()\n\n\n","sub_path":"health_check.py","file_name":"health_check.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"455168295","text":"#!/usr/bin/env python\n\nPACKAGE = 'amr_navigation'\nNODE = 'motion_controller'\n\nimport rospy\nimport tf\nimport math\n\nfrom actionlib.simple_action_server import SimpleActionServer\nfrom geometry_msgs.msg import Twist, Pose2D, PoseStamped\nfrom amr_msgs.msg import MoveToAction, MoveToActionGoal, MoveToResult ,Obstacle\n\nfrom amr_navigation.velocity_controller import Velocity\nfrom amr_navigation.diff_velocity_controller import DiffVelocityController\nfrom amr_navigation.omni_velocity_controller import OmniVelocityController\n\n\nclass MotionControllerNode:\n \"\"\"\n This is a port of the AMR C++ MotionControllerNode\n \"\"\"\n\n CONTROLLER_TYPE_DIFF = 'diff'\n CONTROLLER_TYPE_OMNI = 'omni'\n CONTROLLER_TYPE_UNSPECIFIED = 'unsp'\n\n\n def __init__(self):\n \n rospy.init_node(NODE)\n \n \"\"\"\n Parameters\n \"\"\"\n max_linear_velocity = rospy.get_param('max_linear_velocity', 0.3)\n max_linear_acceleration = rospy.get_param('max_linear_acceleration', 0.05)\n linear_tolerance = rospy.get_param('linear_tolerance', 0.02)\n max_angular_velocity = rospy.get_param('max_angular_velocity', 0.2)\n max_angular_acceleration = rospy.get_param('max_angular_acceleration', 0.03)\n angular_tolerance = rospy.get_param('angular_tolerance', 0.02)\n \n abort_if_obstacle_detected = rospy.get_param('abort_if_obstacle_detected', True)\n self._controller_frequency = rospy.get_param('controller_frequency', 10.0)\n \n controller_type = rospy.get_param('~controller', self.CONTROLLER_TYPE_UNSPECIFIED)\n \n if controller_type == self.CONTROLLER_TYPE_DIFF:\n #Create diff controller\n self._velocity_controller = DiffVelocityController(max_linear_velocity,\n linear_tolerance,\n max_angular_velocity,\n angular_tolerance)\n elif controller_type == self.CONTROLLER_TYPE_OMNI:\n self._velocity_controller = OmniVelocityController(max_linear_velocity,\n linear_tolerance,\n max_angular_velocity,\n angular_tolerance)\n \"\"\"\n ========================= YOUR CODE HERE =========================\n\n Instructions: create an instance of OmniVelocityController. \n Hint: you may copy-paste from the DiffVelocityController case\n and adjust the arguments in the call to the constructor to\n conform to what you have implemented in that class.\n \n \"\"\"\n elif controller_type == self.CONTROLLER_TYPE_UNSPECIFIED:\n rospy.logerr('Controller type not specified. '\n 'Check the [controller] launch parameter')\n exit()\n else:\n #Unknown controller\n rospy.logerr('Requested controller type \"{0}\" unknown. '\n 'Check the [controller] launch parameter'.format(controller_type))\n exit()\n \n \"\"\"\n Publishers\n \"\"\"\n self._velocity_publisher = rospy.Publisher('/cmd_vel', Twist,\n queue_size=10)\n self._current_goal_publisher = rospy.Publisher(NODE+'/current_goal',\n PoseStamped,\n latch=True,\n queue_size=0)\n self._action_goal_publisher = rospy.Publisher(NODE+'/move_to/goal',\n MoveToActionGoal,\n queue_size=1)\n \n \"\"\"\n Subscribers\n \"\"\"\n self._simple_goal_subscriber = rospy.Subscriber(NODE+'/move_to_simple/goal',\n PoseStamped,\n self._simple_goal_callback,\n queue_size=1)\n if abort_if_obstacle_detected:\n self._obstacles_subscriber = rospy.Subscriber('obstacles',\n Obstacle,\n self._obstacles_callback,\n queue_size=100)\n \"\"\"\n Action server\n \"\"\"\n self._move_to_server = SimpleActionServer(NODE+'/move_to',\n MoveToAction,\n self._move_to_callback,\n auto_start=False)\n self._move_to_server.start()\n self._tf= tf.TransformListener()\n rospy.loginfo('Started [motion_controller] node.')\n\n\n def _move_to_callback(self, move_to_goal):\n \"\"\"\n Triggered with a request to move_to action server\n \"\"\"\n rospy.loginfo('Received [move_to] action command.')\n if not self._set_new_goal(move_to_goal):\n return\n else:\n rate = rospy.Rate(self._controller_frequency)\n while not rospy.is_shutdown():\n if not self._move_to_server.is_active():\n # Exit if the goal was aborted\n return\n if self._move_to_server.is_preempt_requested():\n # Process pending preemption requests\n rospy.loginfo('Action preemption requested.')\n if ( self._move_to_server.is_new_goal_available() and\n self._set_new_goal(self._move_to_server.accept_new_goal())\n ):\n # New goal already set\n pass\n else:\n # No new goals, preempt explicitly and exit the callback\n self._publish_zero_velocity()\n self._move_to_server.set_preempted()\n return\n if not self._move_towards_goal():\n # Finish execution if the goal was reached\n self._move_to_server.set_succeeded(MoveToResult(), 'Goal reached.')\n self._publish_zero_velocity()\n return\n rate.sleep()\n self._move_to_server.set_aborted(MoveToResult(), 'Aborted. The node has been killed.')\n\n\n def _simple_goal_callback(self, target_pose):\n \"\"\"\n Wrapper for simple goal action. Forwards as a request to the move_to\n action server. Has to be tested!\n \"\"\"\n rospy.loginfo('Received target pose through the \"simple goal\" topic.'\n 'Wrapping it in the action message and forwarding to the server.')\n rospy.logwarn('Simple goal control is yet to be tested!')\n action_goal = MoveToActionGoal()\n action_goal.header.stamp = rospy.Time.now()\n action_goal.goal.target_pose = target_pose\n self._action_goal_publisher.publish(action_goal)\n\n\n def _obstacles_callback(self, obstacle_msg):\n rospy.logwarn('An obstacle was detected. Will stop the robot and cancel the current action.')\n if self._move_to_server.is_active():\n self._move_to_server.set_aborted(MoveToResult(), 'Obstacle encountered, aborting...')\n self._publish_zero_velocity()\n\n\n def _move_towards_goal(self):\n try:\n time = self._tf.getLatestCommonTime(\"odom\", \"base_footprint\")\n position, quaternion = self._tf.lookupTransform(\"odom\", \"base_footprint\", time)\n except Exception as ex:\n rospy.logwarn('Transform lookup failed (\\odom -> \\base_footprint). '\n 'Reason: {0}.'.format(ex.message))\n return True\n current_pose = Pose2D()\n current_pose.x, current_pose.y = position[0], position[1]\n current_pose.theta = tf.transformations.euler_from_quaternion(quaternion)[2]\n velocity = self._velocity_controller.compute_velocity(current_pose)\n \n if self._velocity_controller.is_target_reached():\n rospy.loginfo('The goal was reached')\n return False\n else:\n self._publish_velocity(velocity)\n return True\n\n\n def _set_new_goal(self, new_goal):\n \"\"\"\n Set new target pose as given in the goal message.\n Checks if the orientation provided in the target pose is valid.\n Publishes the goal pose for the visualization purposes.\n Returns true if the goal was accepted.\n \"\"\"\n if not self._is_quaternion_valid(new_goal.target_pose.pose.orientation):\n rospy.logwarn('Aborted. Target pose has invalid quaternion.')\n self._move_to_server.set_aborted(MoveToResult(),'Aborted. Target pose has invalid quaternion.')\n return False\n else:\n x = new_goal.target_pose.pose.position.x\n y = new_goal.target_pose.pose.position.y\n yaw = tf.transformations.euler_from_quaternion([new_goal.target_pose.pose.orientation.x,\n new_goal.target_pose.pose.orientation.y,\n new_goal.target_pose.pose.orientation.z,\n new_goal.target_pose.pose.orientation.w])[2]\n pose = Pose2D(x, y, yaw)\n self._velocity_controller.set_target_pose(pose)\n self._current_goal_publisher.publish(new_goal.target_pose)\n rospy.loginfo('New target pose: {0}'.format(pose))\n return True\n\n\n def _publish_zero_velocity(self):\n self._publish_velocity(Velocity())\n\n\n def _publish_velocity(self, vel):\n self._velocity_publisher.publish(vel.get_twist())\n\n\n def _is_quaternion_valid(self, q):\n if any([math.isinf(a) or math.isnan(a) for a in [q.x, q.y, q.z, q.w]]):\n rospy.logwarn('Quaternion has NaN\\'s or infinities.')\n return False\n # TODO: check quaternion length and rotation test\n return True\n pass\n\n\nif __name__ == '__main__':\n w = MotionControllerNode()\n rospy.spin()\n\n","sub_path":"amr_navigation/nodes/motion_controller.py","file_name":"motion_controller.py","file_ext":"py","file_size_in_byte":10536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"222031168","text":"import os\nimport nltk\nimport spacy\nimport stanza\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.parse.stanford import StanfordDependencyParser\nfrom nltk.chunk import ne_chunk\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\nfrom nltk.stem.snowball import SnowballStemmer\n\n\ndef nltk_processing(input_directory, output_directory):\n \"\"\"\n NLP Processing on non xml files using NLTK library\n \"\"\"\n os.environ['STANFORD_PARSER'] = '/home/rajpatel/Downloads/stanford-parser-full-2020-11-17/stanford-parser.jar'\n os.environ['STANFORD_MODELS'] = '/home/rajpatel/Downloads/stanford-parser-full-2020-11-17/stanford-parser-4.2.0-models.jar'\n\n path_to_jar = '/home/rajpatel/Downloads/stanford-parser-full-2020-11-17/stanford-parser.jar'\n path_to_models_jar = '/home/rajpatel/Downloads/stanford-parser-full-2020-11-17/stanford-parser-4.2.0-models.jar'\n dependency_parser = StanfordDependencyParser(path_to_jar=path_to_jar, path_to_models_jar=path_to_models_jar)\n\n stop_words = set(stopwords.words('english'))\n\n for filename in os.listdir(input_directory):\n with open(input_directory + '/' + filename, \"r\") as i:\n data = i.read()\n\n filename, ext = os.path.splitext(os.path.basename(filename))\n with open(output_directory + '/' + filename + \"_using_NLTK.txt\", \"w\") as f:\n data = data.replace(\"\\n\", ' ')\n data = data.replace(\"e.g.\", 'e.g.-')\n data = data.replace(\"eg.\", 'e.g.-')\n data = data.replace(\"e.g\", 'e.g.-')\n data = data.replace(\"i.e.\", 'i.e.-')\n\n sentences = sent_tokenize(data)\n\n sentence_count = 0\n organization_count, person_count, gpe_count, bigram_count, trigram_count, \\\n token_count_with_stopwords, token_count_without_stopwords, noun_phrase_count,\\\n porter_words_count, snowball_words_count, lemmatize_words_count = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n f.write(\"\\t\\t\\t\\t *** Text Processing using NLTK *** \\n\")\n for sentence in sentences:\n f.write(f'\\n\\n============================ Sentence {sentence_count + 1} =============================')\n f.write(\"\\n\\n%s \\n\" % sentence)\n\n # tokenize the sentences into words\n words = word_tokenize(sentence)\n token_count_with_stopwords = token_count_with_stopwords + len(words)\n\n # checking for stop words\n words = [word for word in words if not word in stop_words]\n token_count_without_stopwords = token_count_without_stopwords + len(words)\n f.write(\"\\n\\n>> Tokens are: \\n %s\" % words)\n\n # extracting bigrams\n bigram = nltk.bigrams(words)\n bigrams = list(bigram)\n bigram_count += len(bigrams)\n f.write(\"\\n\\n>> Bigrams are: \\n %s\" % bigrams)\n\n # extracting trigrams\n trigram = nltk.trigrams(words)\n trigrams = list(trigram)\n trigram_count += len(trigrams)\n f.write(\"\\n\\n>> Trigrams are: \\n %s\" % trigrams)\n\n # labelling each word with appropriate PoS tag\n tagged_words = nltk.pos_tag(words)\n f.write(\"\\n\\n>> POS Tags are: \\n %s\" % tagged_words)\n\n # dependency parsing\n try:\n result = dependency_parser.raw_parse(sentence)\n dep = result.__next__()\n f.write(\"\\n\\n>> Dependencies are: \\n %s \\n\" % list(dep.triples()))\n except Exception as e:\n print(\"\\n\\n\\n Exception occur in file '%s'. \\n\\n Error is ---> %s\" % (filename, e))\n\n # noun-phrase chunking\n pattern = 'NP: {
?*(||)+}'\n cp = nltk.RegexpParser(pattern)\n cs = cp.parse(tagged_words).subtrees()\n f.write(\"\\n\\n %s \\n\" % cp.parse(tagged_words))\n\n noun_phrases_list = [' '.join(leaf[0] for leaf in tree.leaves()) for tree in cs if tree.label() == 'NP']\n f.write(\"\\n\\n>> Noun Phrases are: \\n %s\" % noun_phrases_list)\n\n # named entity recognition\n ne_tree = ne_chunk(tagged_words)\n ner_list = []\n for chunk in ne_tree:\n if hasattr(chunk, 'label'):\n ner_list.append((chunk.label(), ' '.join(c[0] for c in chunk)))\n\n f.write(\"\\n\\n>> Named Entities are: \\n %s \" % ner_list)\n for org in ner_list:\n if org[0] == 'ORGANIZATION':\n organization_count += 1\n if org[0] == 'PERSON':\n person_count += 1\n if org[0] == 'GPE':\n gpe_count += 1\n\n # stemming using porter stemmer\n porter_root_words = []\n porter_stemmer = PorterStemmer()\n for word in words:\n root_word = porter_stemmer.stem(word)\n porter_root_words.append((word, root_word))\n porter_words_count = porter_words_count + len(porter_root_words)\n f.write(\"\\n\\n>> Stemming using Porter Stemmer: \\n %s\" % porter_root_words)\n\n # stemming using snowball stemmer\n snowball_root_words = []\n snow_stemmer = SnowballStemmer(language='english')\n for word in words:\n root_word = snow_stemmer.stem(word)\n snowball_root_words.append((word, root_word))\n snowball_words_count = snowball_words_count + len(snowball_root_words)\n f.write(\"\\n\\n>> Stemming using Snowball Stemmer: \\n %s\" % snowball_root_words)\n\n # lemmatization\n wordnet_lemmatizer = WordNetLemmatizer()\n lemmatized_words = []\n for word in words:\n lemmatized_word = wordnet_lemmatizer.lemmatize(word)\n lemmatized_words.append((word, lemmatized_word))\n lemmatize_words_count = lemmatize_words_count + len(lemmatized_words)\n f.write(\"\\n\\n>> Lemmatization: \\n %s\\n\\n\" % lemmatized_words)\n\n sentence_count += 1\n\n f.write(\"\\n\\n\\n\\n \\t\\t\\t\\t ***************************** File Report ********************************* \\n\\n\"\n \">> Sentence Count: %s\\n\\n>> Tokens with stopwords: %s\\n>> Tokens without stopwords: %s\\n\\n\"\n \">> Bigram Count: %s\\n>> Trigram Count: %s\\n\\n\"\n \">> Organization Count: %s\\n>> Person Count: %s\\n>> GPE Count: %s\\n\\n\"\n \">> Porter Stemmer Words: %s\\n>> Snowball Stemmer Words: %s\\n>> Lemmatize Words: %s\"\n % (sentence_count, token_count_with_stopwords, token_count_without_stopwords,\n bigram_count, trigram_count, organization_count, person_count, gpe_count,\n porter_words_count, snowball_words_count, lemmatize_words_count))\n\n\ndef spacy_processing(input_directory, output_directory):\n \"\"\"\n NLP Processing on non xml files using SpaCy library\n \"\"\"\n nlp = spacy.load('en_core_web_sm')\n\n for filename in os.listdir(input_directory):\n with open(input_directory + '/' + filename, \"r\") as i:\n data = i.read()\n\n filename, ext = os.path.splitext(os.path.basename(filename))\n with open(output_directory + '/' + filename + \"_using_Spacy.txt\", \"w\") as f:\n data = data.replace(\"\\n\", ' ')\n data = data.replace(\"e.g.\", 'e.g.-')\n data = data.replace(\"i.e.\", 'i.e.-')\n\n doc = nlp(data)\n sentences = list(doc.sents)\n sentence_count, token_with_stopwords, token_without_stopwords, bigram_count, trigram_count,\\\n noun_phrase_count, organization_count, person_count, gpe_count = 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n f.write(\"\\t\\t\\t\\t *** Text Processing using Spacy *** \\n\")\n for sentence in sentences:\n f.write(f'\\n\\n============================ Sentence {sentence_count + 1} =============================')\n f.write(\"\\n\\n%s \\n\" % sentence)\n sentence_count += 1\n\n tokens, pos_tags, dep_tags = [], [], []\n for token in sentence:\n token_with_stopwords = token_with_stopwords + 1\n if not token.is_stop:\n token_without_stopwords = token_without_stopwords + 1\n tokens.append(token)\n pos_tags.append((token.text, token.pos_))\n dep_tags.append((token.text, token.dep_))\n\n f.write(\"\\n\\n>> Tokens are: \\n%s \\n\\n>> PoS Tags are: \\n%s \\n\\n>> Dependency Tags are: \\n%s\"\n % (tokens, pos_tags, dep_tags))\n\n # bigrams generation\n bigrams = []\n for word in range(len(tokens)-1):\n firstWord = tokens[word]\n secondWord = tokens[word + 1]\n element = [firstWord, secondWord]\n bigrams.append(element)\n\n bigram_count += len(bigrams)\n f.write(\"\\n\\n>> Bigrams: \\n%s\" % bigrams)\n\n # trigrams generation\n trigrams = []\n for word in range(len(tokens)-2):\n firstWord = tokens[word]\n secondWord = tokens[word + 1]\n thirdWord = tokens[word + 2]\n element = [firstWord, secondWord, thirdWord]\n trigrams.append(element)\n\n trigram_count += len(trigrams)\n f.write(\"\\n\\n>> Trigrams: \\n%s\" % trigrams)\n\n # noun-phrase chunking\n noun_phrase_count += len(list(sentence.noun_chunks))\n f.write(\"\\n\\n>> Noun Phrases are: \\n%s\" % list(sentence.noun_chunks))\n\n # named entity recognition\n f.write(\"\\n\\n>> Named Entities are: \\n%s\\n\" % [(ent.text, ent.label_) for ent in sentence.ents])\n\n for ent in sentence.ents:\n if ent.label_ == 'ORG':\n organization_count += 1\n if ent.label_ == 'PERSON':\n person_count += 1\n if ent.label_ == 'GPE':\n gpe_count += 1\n\n f.write(\"\\n\\n\\n\\n \\t\\t\\t\\t ***************************** File Report ********************************* \\n\\n\"\n \">> Sentence Count: %s\\n\\n>> Token with stopwords: %s\\n>> Token without stopwords: %s\\n\\n\"\n \">> Total Bigrams: %s\\n>> Total Trigrams: %s\\n\\n>> Total Noun Phrase: %s\\n\\n\"\n \">> Organization Count: %s\\n>> Person Count: %s\\n>> GPE Count: %s\"\n % (sentence_count, token_with_stopwords, token_without_stopwords, bigram_count, trigram_count,\n noun_phrase_count, organization_count, person_count, gpe_count))\n\n\ndef stanza_processing(input_directory, output_directory):\n \"\"\"\n NLP Processing on non xml files using Stanza library\n \"\"\"\n nlp = stanza.Pipeline(lang='en', processors='tokenize, mwt, pos, lemma, ner, depparse')\n for filename in os.listdir(input_directory):\n with open(input_directory + '/' + filename, \"r\") as i:\n data = i.read()\n\n filename, ext = os.path.splitext(os.path.basename(filename))\n with open(output_directory + '/' + filename + \"_using_Stanza.txt\", \"w\") as f:\n data = data.replace(\"\\n\", ' ')\n data = data.replace(\"e.g.\", 'e.g.-')\n data = data.replace(\"i.e.\", 'i.e.-')\n\n sentence_count = 0\n token_count_with_stopwords, lemma_count, organization_count, person_count, gpe_count = 0, 0, 0, 0, 0\n doc = nlp(data)\n f.write(\"\\t\\t\\t\\t *** Text Processing using Stanza *** \\n\")\n for i, sentence in enumerate(doc.sentences):\n f.write(f'\\n\\n========================== Sentence {i + 1} ===========================')\n f.write(\"\\n\\n %s \\n\" % sentence.text)\n f.write(\"\\nTokens are: \")\n tokens = []\n for token in sentence.tokens:\n tokens.append(token.text)\n token_count_with_stopwords += len(tokens)\n f.write(\"\\n>> %s \" % tokens)\n\n upos, xpos, lemma, dep_parse = [], [], [], []\n for word in sentence.words:\n upos.append((word.text, word.upos))\n xpos.append((word.text, word.xpos))\n lemma.append((word.text, word.lemma))\n\n head = sentence.words[word.head - 1].text if word.head > 0 else 'root'\n dep_parse.append(((word.text, head), word.deprel))\n\n lemma_count += len(lemma)\n f.write(\"\\n\\n UPOS tags are: \\n>> %s \\n\\n XPOS tags are: \\n>> %s \\n\\n Lemmas are: \\n>> %s \"\n \"\\n\\n Dependency tags are: \\n>> %s\" % (upos, xpos, lemma, dep_parse))\n\n ner = []\n for ent in sentence.ents:\n if ent.type == 'ORG':\n organization_count += 1\n if ent.type == 'GPE':\n gpe_count += 1\n if ent.type == 'PERSON':\n person_count += 1\n ner.append((ent.text, ent.type))\n f.write(\"\\n\\n Named Entities are: \\n>> %s\" % ner)\n sentence_count += 1\n\n f.write(\"\\n\\n\\n\\n \\t\\t\\t\\t ***************************** File Report ********************************* \\n\\n\"\n \">> Sentence Count: %s\\n\\n>> Total Tokens: %s\\n\\n>> Total Lemmas: %s\\n\\n\"\n \">> Organization Count: %s\\n>> Person Count: %s\\n>> GPE Count: %s\"\n % (i + 1, token_count_with_stopwords, lemma_count, organization_count, person_count, gpe_count))\n\n\nif __name__ == '__main__':\n print(\"\\n\\n\\n *** NLP Processing on non xml files *** \\n\\n\")\n print(\"Please select the processing method:\\n\"\n \"1. NLP processing using NLTK\\n\"\n \"2. NLP processing using Spacy\\n\"\n \"3. NLP processing using Stanza\\n\")\n user_choice = input(\"\\nPlease Enter your choice (1, 2 or 3):\")\n\n input_file_directory = input(\"\\n> Enter the input directory path:\")\n output_file_directory = input(\"\\n> Enter the output directory path:\")\n\n if user_choice == '1':\n print(\"\\n\\n NLTK in Progress...\")\n nltk_processing(input_file_directory, output_file_directory)\n elif user_choice == '2':\n print(\"\\n\\n Spacy in Progress...\")\n spacy_processing(input_file_directory, output_file_directory)\n elif user_choice == '3':\n print(\"\\n\\n Stanza in Progress...\")\n stanza_processing(input_file_directory, output_file_directory)\n else:\n print(\"Invalid Choice\")\n","sub_path":"text_tokenizer.py","file_name":"text_tokenizer.py","file_ext":"py","file_size_in_byte":15102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"5049497","text":"# -*- coding: utf-8 -*-\nfrom magic_ui.module.base_config import BaseConfig\n\n__author__ = 'litang.wang'\n\n\nclass PCConfig(BaseConfig):\n def __init__(self):\n BaseConfig.__init__(self)\n self.chromedriver_path = self.root_path + 'resource/chromedriver.exe'\n self.iedriver_path = self.root_path + 'resource/IEDriverServer.exe'\n self.first_url = 'http://www.kuaishou.com/hot'\n self.thread_count = 1\n","sub_path":"magic_ui/module/pc_config.py","file_name":"pc_config.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525980044","text":"from requests_oauthlib import OAuth2Session\nimport os\nimport json\nimport urllib.parse\n\nCLIENT_ID = os.environ[\"CLIENT_ID\"]\nCLIENT_SECRET = os.environ[\"CLIENT_SECRET\"]\nREDIRECT_URI = os.environ[\"REDIRECT_URI\"]\nBASE_URL = os.environ[\"BASE_URL\"]\n\nTOKEN_URL = 'https://www.linkedin.com/uas/oauth2/accessToken' \n\ndef oauth(event, context):\n queryStringParameters = event.get(\"queryStringParameters\", {})\n\n if not queryStringParameters or not queryStringParameters.get(\"state\") or not queryStringParameters.get(\"code\"):\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Content-Type\": \"text/html\"\n },\n \"body\": f'Missing URI Parameters. Navigate to the Login Page to restart.'\n }\n\n state = queryStringParameters.get(\"state\")\n encodedParameters = urllib.parse.urlencode(queryStringParameters)\n requestUrl = f\"{BASE_URL}?{encodedParameters}\"\n linkedin = OAuth2Session(CLIENT_ID, redirect_uri=REDIRECT_URI, state=state)\n\n try:\n token = linkedin.fetch_token(\n TOKEN_URL,\n client_secret=CLIENT_SECRET,\n include_client_id=True,\n authorization_response=requestUrl,\n verify=False\n )\n except:\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Content-Type\": \"text/html\"\n },\n \"body\": f'The application failed to fetch an Oauth token. Navigate to Login Page to restart.'\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(token)\n }\n return response\n","sub_path":"LinkedIn-OAuth/linkedin/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"273714376","text":"x1=int(input(\"Enter row value of current position of row between 1-8:: \"))\ny1=int(input(\"Enter column value of current position of column between 1-8:: \"))\nx2=int(input(\"Enter destintation row value 1-8:: \"))\ny2=int(input(\"Enter destintation column value 1-8:: \"))\nif((x1>0 and x1<9) and (y1>0 and y1<9) and(x2>0 and x2<9) and(y2>0 and y2<9)):\n\tif((x2==x1 or x2==x1+1 or x2==x1-1) and (y2==y1 or y2==y1+1 or y2==y1-1)):\n\t\tprint(\"YES! Cary On, The King can move...\")\n\telse:\n\t\tprint(\"King can't move\")\nelse:\n\tprint(\"NO!You CAN'T play..You should enter 1-8 nos Only....\")","sub_path":"Conditions_23.py","file_name":"Conditions_23.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"649308479","text":"import pyspark\nfrom pyspark.sql import SparkSession\nimport pymongo\nimport json\nimport copy\nfrom pyspark.sql.functions import lower, col\nfrom pyspark import SparkContext\nimport pyspark.sql\nfrom pyspark.sql.functions import explode\nimport sys\n\n\nconf = pyspark.SparkConf()\n#conf.setMaster(\"spark://104.198.99.155.80:7077\")\nconf.set(\"spark.mongodb.input.uri\", \"mongodb://104.197.54.204/tweets.tweet\")\nconf.set(\"spark.mongodb.output.uri\", \"mongodb://104.197.54.204/tweets.words\")\nconf.set(\"spark.jars.packages\", \"org.mongodb.spark:mongo-spark-connector_2.11:2.3.2\")\nsc = pyspark.SparkContext(conf=conf)\nmy_spark = SparkSession(sc)\n\n'''\nmy_spark = SparkSession \\\n .builder \\\n .appName(\"myApp\") \\\n .master(\"spark://35.233.240.80:8088\") \\\n .config(\"spark.mongodb.input.uri\", \"mongodb://104.197.54.204/tweets.tweet\") \\\n .config(\"spark.mongodb.output.uri\", \"mongodb://104.197.54.204/tweets.words\") \\\n .config(\"spark.jars.packages\", \"org.mongodb.spark:mongo-spark-connector_2.11:2.3.2\") \\\n .getOrCreate()\n'''\n\nif __name__ == \"__main__\":\n print(\"Hello World\")\n\n #spark = SparkSession.builder.appName(\"test\").getOrCreate()\n\n df = my_spark.read.format(\"com.mongodb.spark.sql.DefaultSource\").load()\n # df.show()\n #df.collect()\n # rx = \".*\"+keyword+\".*\"\n #text = df.text\n\n #lower case all tweets\n unionDF = df.select(df.text, df.place)\n unionDF = unionDF.withColumn('text', lower(col('text')));\n # explode_DF = unionDF.withColumn('full_name', explode('full_name'))\n #get keyword\n #trump: 1938\n keyword = sys.argv[1].lower()\n tweets_with_words = unionDF[unionDF['text'].contains(keyword)]\n df2 = tweets_with_words.select('text', \"place.*\")\n #df2.printSchema()\n # tweets_with_words = unionDF.filter(unionDF.text == keyword)\n\n #count = tweets_with_words.count()\n # Tokens = unionDF.select(\"trump\").collect();\n # tweets_with_words = unionDF.filter(unionDF.text[keyword])\n\n #new_rdd = rdd.filter(lambda x: x in Tokens)\n '''\n full_name = \"county, State\"\n '''\n\n #print(count)\n #print(unionDF.head(2))\n #print(tweets_with_words.head(2))\n #tweets_with_words.select(\"text\").show(10)\n # print(tweets_with_words.head(10))\n\n states = {\n 'AK': 0,\n 'AL': 0,\n 'AR': 0,\n 'AZ': 0,\n 'CA': 0,\n 'CO': 0,\n 'CT': 0,\n 'DE': 0,\n 'FL': 0,\n 'GA': 0,\n 'HI': 0,\n 'IA': 0,\n 'ID': 0,\n 'IL': 0,\n 'IN': 0,\n 'KS': 0,\n 'KY': 0,\n 'LA': 0,\n 'MA': 0,\n 'MD': 0,\n 'ME': 0,\n 'MI': 0,\n 'MN': 0,\n 'MO': 0,\n 'MS': 0,\n 'MT': 0,\n 'NC': 0,\n 'ND': 0,\n 'NE': 0,\n 'NH': 0,\n 'NJ': 0,\n 'NM': 0,\n 'NV': 0,\n 'NY': 0,\n 'OH': 0,\n 'OK': 0,\n 'OR': 0,\n 'PA': 0,\n 'RI': 0,\n 'SC': 0,\n 'SD': 0,\n 'TN': 0,\n 'TX': 0,\n 'UT': 0,\n 'VA': 0,\n 'VT': 0,\n 'WA': 0,\n 'WI': 0,\n 'WV': 0,\n 'WY': 0\n }\n\n us_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI', #['Michigan', ' USA']\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n}\n\n places = df2.select(\"full_name\").collect()\n #print(len(places))\n l = []\n for i in range(len(places)):\n l.append(places[i].__getitem__(\"full_name\"))\n\n\n state = us_state_abbrev.keys()\n abbr = states.keys()\n\n for i in l:\n if(i == None):\n continue\n l2 = i.split(',')\n if len(l2) == 1 or len(l2) > 2:\n continue\n l2[0] = l2[0].replace(\" \", \"\")\n l2[1] = l2[1].replace(\" \", \"\")\n if(l2[1] == 'USA'):\n if(l2[0] in state):\n a = us_state_abbrev[l2[0]]\n states[a] += 1\n else:\n continue\n else:\n if(l2[1] in abbr):\n states[l2[1]] += 1\n else:\n continue\n\n states['word'] = keyword\n client = pymongo.MongoClient(\"mongodb://104.197.54.204\",27017)\n db = client[\"tweets\"]\n coll = db['words']\n coll.update_one({'word': keyword}, {\"$set\": states}, upsert=True)\n #print(states)\n","sub_path":"WEB/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632052095","text":"# -*- coding: utf-8 -*-\r\nimport DrawTablet as DT\r\nfrom datetime import datetime\r\n\r\n\r\ndef main():\r\n log_file_path = 'log/log_'+datetime.now().strftime(\"%Y-%m-%d_%H%M%S\").__str__()+'.txt'\r\n dt = DT.DrawTablet()\r\n try:\r\n dt.run(log_file_path)\r\n finally:\r\n dt.log_dump()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n while True:\r\n main()\r\n","sub_path":"Recoder.py","file_name":"Recoder.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"116745084","text":"import pickle\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport jax.numpy as jnp\nimport numpy as np\nimport tensorboard.summary\nfrom jax.tree_util import tree_map\n\nCheckpoint = namedtuple('Checkpoint', 'step loss path')\n\n\nclass CheckpointStore:\n r\"\"\"Stores training checkpoints in the working directory.\n\n Args:\n workdir (str): path where checkpoints are stored.\n size (int): maximum number of checkpoints stored at any time.\n interval (int): number of steps between two checkpoints.\n \"\"\"\n\n PATTERN = 'chkpt-{}.pt'\n\n def __init__(self, workdir, *, size=3, interval=1000):\n self.workdir = Path(workdir)\n for p in self.workdir.glob(self.PATTERN.format('*')):\n p.unlink()\n self.size = size\n self.interval = interval\n self.chkpts = []\n self.buffer = None\n\n def update(self, step, state, loss=jnp.inf):\n self.buffer = (step, state, loss)\n if not self.chkpts or (step >= self.interval + self.chkpts[-1].step):\n self.dump()\n while len(self.chkpts) > self.size:\n self.chkpts.pop(0).path.unlink()\n\n def dump(self):\n step, state, loss = self.buffer\n path = self.workdir / self.PATTERN.format(step)\n with path.open('wb') as f:\n pickle.dump((step, state), f)\n self.chkpts.append(Checkpoint(step, loss, path))\n\n def close(self):\n if self.buffer and not any(tree_map(lambda x: x.is_deleted(), self.buffer[1])):\n self.dump()\n # If the training crashes KFAC might have already freed the buffers and the\n # state can no longer be dumped. Preventing this by keeping a copy significantly\n # impacts the performance and is therefore omitted.\n\n @property\n def last(self):\n chkpt = self.chkpts[-1]\n with chkpt.path.open('rb') as f:\n return pickle.load(f)\n\n\nclass H5LogTable:\n r\"\"\"An interface for writing results to HDF5 files.\"\"\"\n\n def __init__(self, group):\n self._group = group\n\n def __getitem__(self, label):\n return self._group[label] if label in self._group else []\n\n def resize(self, size):\n for ds in self._group.values():\n ds.resize(size, axis=0)\n\n # mimicking Pytables API\n @property\n def row(self):\n class Appender:\n def __setitem__(_, label, row): # noqa: B902, N805\n if isinstance(row, np.ndarray):\n shape = row.shape\n elif isinstance(row, jnp.ndarray):\n shape = row.shape\n elif isinstance(row, (float, int)):\n shape = ()\n if label not in self._group:\n if isinstance(row, np.ndarray):\n dtype = row.dtype\n elif isinstance(row, float):\n dtype = float\n else:\n dtype = None\n self._group.create_dataset(\n label, (0, *shape), maxshape=(None, *shape), dtype=dtype\n )\n ds = self._group[label]\n ds.resize(ds.shape[0] + 1, axis=0)\n ds[-1, ...] = row\n\n return Appender()\n\n\nclass TensorboardMetricLogger:\n r\"\"\"An interface for writing metrics to Tensorboard.\"\"\"\n\n def __init__(self, workdir, n_mol):\n self.global_writer = tensorboard.summary.Writer(workdir)\n self.per_mol_writers = [\n tensorboard.summary.Writer(f'{workdir}/{i}') for i in range(n_mol)\n ]\n\n def update(self, step, stats, prefix=None):\n r\"\"\"Update tensorboard writer with a dictionary of scalar entries.\n\n Args:\n step (int): the step at which to add the new entries.\n stats (dict): a dictionary containing the scalar entries to add.\n \"\"\"\n per_mol = stats.pop('per_mol')\n for k, v in per_mol.items():\n for i, writer in enumerate(self.per_mol_writers):\n if not (jnp.isnan(v[i]) or jnp.isinf(v[i])):\n writer.add_scalar(f'{prefix}/{k}' if prefix else k, v[i], step)\n for k, v in stats.items():\n self.global_writer.add_scalar(f'{prefix}/{k}' if prefix else k, v, step)\n\n def close(self):\n self.global_writer.close()\n for writer in self.per_mol_writers:\n writer.close()\n","sub_path":"src/deepqmc/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"629854227","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport scipy as sp\nimport scipy.misc\nimport sys\nimport cv2\nimport time\n\nprint('\\n')\n\n# Constants\n\nimg_num = 61191\ntest_num = 40668\nfile_num = 20521\nimg_size = 64\n\n\ndef down_sample(image):\n\timage_ds = np.zeros([64,64,4], dtype=np.uint8)\n\n\tx = [0,0,0,0]\n\n\tfor i in range(64):\n\t\tfor j in range(64):\n\t\t\tx[0] = sum(sum(image[4*i:4*(i+1), 4*j:4*(j+1), 0])) // 256 // 4**2\n\t\t\tx[1] = sum(sum(image[4*i:4*(i+1), 4*j:4*(j+1), 1])) // 256 // 4**2\n\t\t\tx[2] = sum(sum(image[4*i:4*(i+1), 4*j:4*(j+1), 2])) // 256 // 4**2\n\t\t\tx[3] = sum(sum(image[4*i:4*(i+1), 4*j:4*(j+1), 3])) // 256 // 4**2\n\n\t\t\timage_ds[i,j,0] = x[0]\n\t\t\timage_ds[i,j,1] = x[1]\n\t\t\timage_ds[i,j,2] = x[2]\n\t\t\timage_ds[i,j,3] = x[3]\n\n\treturn image_ds\n\n\n# Make Image Array\n\nimage_array = np.zeros([img_num,img_size,img_size,4], dtype=np.uint8)\n\n\nfor i in range(file_num+1):\n\n\tif i%100 == 0:\n\t\tprint(i)\n\n\tfp = '../test-tif-v2/file_{}.tif'.format(i)\n\timage = cv2.imread(fp, -1)\n\t\n\timage_ds = down_sample(image)\n\n\timage_array[i,:,:,:] = image_ds\n\n\nfor i in range(test_num+1):\n\n\tif i%100 == 0:\n\t\tprint(i)\n\n\tfp = '../test-tif-v2/test_{}.tif'.format(i)\n\timage = cv2.imread(fp, -1)\n\t\n\timage_ds = down_sample(image)\n\n\timage_array[i+file_num,:,:,:] = image_ds\n\n\nwith open('./numpy_data/test_images_64.npy', 'wb') as f:\n\tnp.save(f, image_array)","sub_path":"make_array.py","file_name":"make_array.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401504202","text":"from pyomo.environ import ConcreteModel, Set, Objective, Var, Param, Constraint, NonNegativeReals, maximize, summation\n\n# create the model\ndef create_model(name, template, nodes, links, types, ts_idx, params, blocks):\n\n m = ConcreteModel(name=name)\n\n # SETS\n\n # basic sets\n m.Nodes = Set(initialize=nodes) # nodes\n m.Links = Set(initialize=links) # links\n m.TS = Set(initialize=ts_idx, ordered=True) # time steps - initialize later?\n\n # all nodes directly upstream from a node\n def NodesIn_init(m, node):\n return [i for (i,j) in m.Links if j == node]\n m.NodesIn = Set(m.Nodes, initialize=NodesIn_init)\n\n # all nodes directly downstream from a node\n def NodesOut_init(m, node):\n return [k for (j, k) in m.Links if j==node]\n m.NodesOut = Set(m.Nodes, initialize=NodesOut_init) \n\n # sets (nodes or links) for each template type\n for k, v in types['node'].items():\n exec('m.{} = Set(within=m.Nodes, initialize={})'.format(k, v))\n for k, v in types['link'].items():\n exec('m.{} = Set(within=m.Links, initialize={})'.format(k, v))\n #for k, v in types['network'].items():\n #exec('m.{} = Set(within=m.Network, initialize={})'.format(k, v))\n\n # sets for non-storage nodes\n m.NonReservoir = m.Nodes - m.Reservoir\n m.DemandNodes = m.NonReservoir - m.Junction\n m.NonJunction = m.Nodes - m.Junction\n\n # set - all blocks in each demand or reservoir node, and identify node-blocks\n def NodeBlockLookup_init(m, i):\n if i in blocks['node']:\n return blocks['node'][i]\n else:\n return [0]\n m.NodeBlockLookup = Set(m.NonJunction, initialize=NodeBlockLookup_init)\n\n # set - all blocks in each link\n def LinkBlockLookup_init(m, i, j):\n if (i,j) in blocks['link']:\n return blocks['link'][(i,j)]\n else:\n return [0]\n m.LinkBlockLookup = Set(m.Links, initialize=LinkBlockLookup_init)\n\n # create node-block and link-block sets\n\n def NodeBlock(m):\n blocks = []\n for i in m.Nodes:\n for b in NodeBlockLookup_init(m, i):\n blocks.append((i, b))\n return blocks\n \n def LinkBlock(m):\n blocks = []\n for i, j in m.Links:\n for b in LinkBlockLookup_init(m, i, j):\n blocks.append((i, j, b))\n return blocks\n \n m.NodeBlocks = Set(dimen=2, initialize=NodeBlock)\n m.LinkBlocks = Set(dimen=3, initialize=LinkBlock)\n\n # VARIABLES (all variables should be prepended with resource type)\n\n m.nodeDelivery = Var(m.Nodes * m.TS, domain=NonNegativeReals) # delivery to demand nodes\n m.nodeDeliveryDB = Var(m.NodeBlocks * m.TS, domain=NonNegativeReals) # delivery to demand nodes\n m.nodeDeliverySurplus = Var(m.Nodes * m.TS, domain=NonNegativeReals) # delivery to demand nodes\n m.nodeStorage = Var(m.Reservoir * m.TS, domain=NonNegativeReals) # storage\n #m.S_DB = Var(m.NodeBlocks * m.TS, domain=NonNegativeReals) # storage by demand block\n\n m.nodeGain = Var(m.Nodes * m.TS, domain=NonNegativeReals) # gain (local inflow)\n m.nodeLoss = Var(m.Nodes * m.TS, domain=NonNegativeReals) # loss (local outflow)\n m.nodeInflow = Var(m.Nodes * m.TS, domain=NonNegativeReals) # total inflow to a node\n m.nodeOutflow = Var(m.Nodes * m.TS, domain=NonNegativeReals) # total outflow from a node\n\n m.linkFlow = Var(m.Links * m.TS, domain=NonNegativeReals) # flow in links \n m.linkFlowLB = Var(m.LinkBlocks * m.TS, domain=NonNegativeReals) # flow in links\n m.linkFlowSurplus = Var(m.Links * m.TS, domain=NonNegativeReals) # flow in links \n\n # PARAMETERS\n \n for param in params.values():\n if param['is_var'] == 'N':\n initial_values = param['initial_values']\n expression = param['expression']\n exec(expression)\n\n m.nodeLocalGain = Param(m.Nodes, m.TS, default=0) # placeholder\n m.nodeLocalLoss = Param(m.Nodes, m.TS, default=0) # placeholder\n\n # CONSTRAINTS\n\n # Constraint set: boundary conditions\n def LocalGain_rule(m, j, t):\n return m.nodeGain[j,t] == m.nodeRunoff[j,t] + m.nodeLocalGain[j,t]\n m.LocalGain_constraint = Constraint(m.Nodes, m.TS, rule=LocalGain_rule)\n\n def LocalLoss_rule(m, j, t):\n return m.nodeLoss[j,t] == m.nodeLocalLoss[j,t] + m.nodeDelivery[j,t] * m.nodeConsumptiveLoss[j,t] / 100\n m.LocalLoss_constraint = Constraint(m.Nodes, m.TS, rule=LocalLoss_rule)\n\n def Inflow_rule(m, j, t):\n return m.nodeInflow[j,t] == sum(m.linkFlow[i,j,t] for i in m.NodesIn[j])\n m.Inflow_constraint = Constraint(m.Nodes, m.TS, rule=Inflow_rule)\n\n def Outflow_rule(m, j, t): # not to be confused with Outflow resources\n if j in m.OutflowNode:\n return Constraint.Skip # no outflow constraint at outflow nodes\n else:\n return m.nodeOutflow[j,t] == sum(m.linkFlow[j,k,t] for k in m.NodesOut[j])\n m.Outflow_constraint = Constraint(m.Nodes, m.TS, rule=Outflow_rule)\n\n def Delivery_rule(m, j, t): # the same as I, but for a different purpose\n return m.nodeDelivery[j,t] == sum(m.linkFlow[i,j,t] for i in m.NodesIn[j])\n m.Delivery_constraint = Constraint(m.DemandNodes, m.TS, rule=Delivery_rule)\n\n def NodeBlock_rule(m, j, b, t):\n return m.nodeDeliveryDB[j,b,t] <= m.nodeDemand[j,b,t]\n m.NodeBlock_constraint = Constraint(m.NodeBlocks, m.TS, rule=NodeBlock_rule)\n\n #def LinkBlock_rule(m, i, j, b, t):\n # not implemented yet\n #'''Each link block flow is less than or equal to the link block demand.'''\n #return m.Flow_LB[i,j,b,t] <= m.Link_Demand[i,j,b,t]\n #m.LinkBlock_constraint = Constraint(m.LinkBlocks, m.TS, rule=LinkBlock_rule)\n\n # Constraint set: Block mass balances\n \n def NodeBlockMassBalance_rule(m, j, t):\n '''Water delivered to each node equals the sum of demand blocks for the node'''\n return m.nodeDelivery[j,t] == sum(m.nodeDeliveryDB[j,b,t] for b in m.NodeBlockLookup[j]) + m.nodeDeliverySurplus[j,t]\n m.DemandNodeBlockMassBalance = Constraint(m.DemandNodes, m.TS, rule=NodeBlockMassBalance_rule)\n m.ReservoirBlockMassBalance = Constraint(m.Reservoir, m.TS, rule=NodeBlockMassBalance_rule)\n\n def LinkBlockMassBalance_rule(m, i, j, t):\n '''Water delivered via each link equals the sum of demand blocks for the link'''\n return m.linkFlow[i,j,t] == sum(m.linkFlowLB[i,j,b,t] for b in m.LinkBlockLookup[i,j]) + m.linkFlowSurplus[i,j,t]\n m.LinkBlockMassBalance = Constraint(m.Links, m.TS, rule=LinkBlockMassBalance_rule)\n\n # general mass balance\n def MassBalance_rule(m, j, t):\n if j in m.Reservoir:\n if t == m.TS.first():\n return m.nodeStorage[j, t] - m.nodeInitialStorage[j] == \\\n m.nodeGain[j, t] + m.nodeInflow[j, t] - m.nodeLoss[j, t] - m.nodeOutflow[j,t]\n else:\n return m.nodeStorage[j, t] - m.nodeStorage[j, m.TS.prev(t)] == \\\n m.nodeGain[j, t] + m.nodeInflow[j,t] - m.nodeLoss[j, t] - m.nodeOutflow[j,t]\n else:\n return m.nodeGain[j, t] + m.nodeInflow[j,t] == m.nodeLoss[j, t] + m.nodeOutflow[j, t]\n m.MassBalance = Constraint(m.Nodes, m.TS, rule=MassBalance_rule)\n\n # channel capacity\n def ChannelCap_rule(m, i, j, t):\n return m.linkFlow[i,j,t] <= m.linkFlowCapacity[i,j,t]\n m.ConveyanceCapacity = Constraint(m.Conveyance, m.TS, rule=ChannelCap_rule)\n m.DeliveryLinkCapacity = Constraint(m.DeliveryLink, m.TS, rule=ChannelCap_rule)\n\n # storage capacity\n def StorageBounds_rule(m, j, t):\n return (m.nodeInactivePool[j,t], m.nodeStorage[j,t], m.nodeStorageCapacity[j,t])\n m.StorageBounds = Constraint(m.Reservoir, m.TS, rule=StorageBounds_rule)\n \n def StorageDefinition_rule(m, j, t):\n return m.nodeStorage[j,t] == m.nodeDelivery[j,t]\n m.StorageDefinition = Constraint(m.Reservoir, m.TS, rule=StorageDefinition_rule)\n\n # OBJECTIVE FUNCTION\n\n def Objective_fn(m):\n # Link demand / value not yet implemented\n return summation(m.nodeValue, m.nodeDeliveryDB) #+ summation(m.Link_Value, m.Flow_LB)\n m.Ojective = Objective(rule=Objective_fn, sense=maximize)\n\n return m\n \n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"535919982","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass UpdateRuleAclDto:\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'address_type': 'int',\n 'name': 'str',\n 'sequence': 'OrderRuleAclDto',\n 'direction': 'int',\n 'action_type': 'int',\n 'status': 'int',\n 'description': 'str',\n 'long_connect_time_hour': 'int',\n 'long_connect_time_minute': 'int',\n 'long_connect_time_second': 'int',\n 'long_connect_time': 'int',\n 'long_connect_enable': 'int',\n 'source': 'RuleAddressDto',\n 'destination': 'RuleAddressDto',\n 'service': 'RuleServiceDto',\n 'type': 'int'\n }\n\n attribute_map = {\n 'address_type': 'address_type',\n 'name': 'name',\n 'sequence': 'sequence',\n 'direction': 'direction',\n 'action_type': 'action_type',\n 'status': 'status',\n 'description': 'description',\n 'long_connect_time_hour': 'long_connect_time_hour',\n 'long_connect_time_minute': 'long_connect_time_minute',\n 'long_connect_time_second': 'long_connect_time_second',\n 'long_connect_time': 'long_connect_time',\n 'long_connect_enable': 'long_connect_enable',\n 'source': 'source',\n 'destination': 'destination',\n 'service': 'service',\n 'type': 'type'\n }\n\n def __init__(self, address_type=None, name=None, sequence=None, direction=None, action_type=None, status=None, description=None, long_connect_time_hour=None, long_connect_time_minute=None, long_connect_time_second=None, long_connect_time=None, long_connect_enable=None, source=None, destination=None, service=None, type=None):\n \"\"\"UpdateRuleAclDto\n\n The model defined in huaweicloud sdk\n\n :param address_type: 地址类型,0 ipv4,1 ipv6\n :type address_type: int\n :param name: 规则名称\n :type name: str\n :param sequence: \n :type sequence: :class:`huaweicloudsdkcfw.v1.OrderRuleAclDto`\n :param direction: 规则方向\n :type direction: int\n :param action_type: 动作0:permit,1:deny\n :type action_type: int\n :param status: 规则下发状态 0:禁用,1:启用\n :type status: int\n :param description: 描述\n :type description: str\n :param long_connect_time_hour: 长连接时长小时\n :type long_connect_time_hour: int\n :param long_connect_time_minute: 长连接时长分钟\n :type long_connect_time_minute: int\n :param long_connect_time_second: 长连接时长秒\n :type long_connect_time_second: int\n :param long_connect_time: 长连接时长\n :type long_connect_time: int\n :param long_connect_enable: 是否支持长连接,0表示不支持,1表示支持\n :type long_connect_enable: int\n :param source: \n :type source: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n :param destination: \n :type destination: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n :param service: \n :type service: :class:`huaweicloudsdkcfw.v1.RuleServiceDto`\n :param type: 规则type,0:互联网规则,1:vpc规则,2:nat规则\n :type type: int\n \"\"\"\n \n \n\n self._address_type = None\n self._name = None\n self._sequence = None\n self._direction = None\n self._action_type = None\n self._status = None\n self._description = None\n self._long_connect_time_hour = None\n self._long_connect_time_minute = None\n self._long_connect_time_second = None\n self._long_connect_time = None\n self._long_connect_enable = None\n self._source = None\n self._destination = None\n self._service = None\n self._type = None\n self.discriminator = None\n\n if address_type is not None:\n self.address_type = address_type\n if name is not None:\n self.name = name\n if sequence is not None:\n self.sequence = sequence\n if direction is not None:\n self.direction = direction\n if action_type is not None:\n self.action_type = action_type\n if status is not None:\n self.status = status\n if description is not None:\n self.description = description\n if long_connect_time_hour is not None:\n self.long_connect_time_hour = long_connect_time_hour\n if long_connect_time_minute is not None:\n self.long_connect_time_minute = long_connect_time_minute\n if long_connect_time_second is not None:\n self.long_connect_time_second = long_connect_time_second\n if long_connect_time is not None:\n self.long_connect_time = long_connect_time\n if long_connect_enable is not None:\n self.long_connect_enable = long_connect_enable\n if source is not None:\n self.source = source\n if destination is not None:\n self.destination = destination\n if service is not None:\n self.service = service\n if type is not None:\n self.type = type\n\n @property\n def address_type(self):\n \"\"\"Gets the address_type of this UpdateRuleAclDto.\n\n 地址类型,0 ipv4,1 ipv6\n\n :return: The address_type of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._address_type\n\n @address_type.setter\n def address_type(self, address_type):\n \"\"\"Sets the address_type of this UpdateRuleAclDto.\n\n 地址类型,0 ipv4,1 ipv6\n\n :param address_type: The address_type of this UpdateRuleAclDto.\n :type address_type: int\n \"\"\"\n self._address_type = address_type\n\n @property\n def name(self):\n \"\"\"Gets the name of this UpdateRuleAclDto.\n\n 规则名称\n\n :return: The name of this UpdateRuleAclDto.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this UpdateRuleAclDto.\n\n 规则名称\n\n :param name: The name of this UpdateRuleAclDto.\n :type name: str\n \"\"\"\n self._name = name\n\n @property\n def sequence(self):\n \"\"\"Gets the sequence of this UpdateRuleAclDto.\n\n :return: The sequence of this UpdateRuleAclDto.\n :rtype: :class:`huaweicloudsdkcfw.v1.OrderRuleAclDto`\n \"\"\"\n return self._sequence\n\n @sequence.setter\n def sequence(self, sequence):\n \"\"\"Sets the sequence of this UpdateRuleAclDto.\n\n :param sequence: The sequence of this UpdateRuleAclDto.\n :type sequence: :class:`huaweicloudsdkcfw.v1.OrderRuleAclDto`\n \"\"\"\n self._sequence = sequence\n\n @property\n def direction(self):\n \"\"\"Gets the direction of this UpdateRuleAclDto.\n\n 规则方向\n\n :return: The direction of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._direction\n\n @direction.setter\n def direction(self, direction):\n \"\"\"Sets the direction of this UpdateRuleAclDto.\n\n 规则方向\n\n :param direction: The direction of this UpdateRuleAclDto.\n :type direction: int\n \"\"\"\n self._direction = direction\n\n @property\n def action_type(self):\n \"\"\"Gets the action_type of this UpdateRuleAclDto.\n\n 动作0:permit,1:deny\n\n :return: The action_type of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._action_type\n\n @action_type.setter\n def action_type(self, action_type):\n \"\"\"Sets the action_type of this UpdateRuleAclDto.\n\n 动作0:permit,1:deny\n\n :param action_type: The action_type of this UpdateRuleAclDto.\n :type action_type: int\n \"\"\"\n self._action_type = action_type\n\n @property\n def status(self):\n \"\"\"Gets the status of this UpdateRuleAclDto.\n\n 规则下发状态 0:禁用,1:启用\n\n :return: The status of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this UpdateRuleAclDto.\n\n 规则下发状态 0:禁用,1:启用\n\n :param status: The status of this UpdateRuleAclDto.\n :type status: int\n \"\"\"\n self._status = status\n\n @property\n def description(self):\n \"\"\"Gets the description of this UpdateRuleAclDto.\n\n 描述\n\n :return: The description of this UpdateRuleAclDto.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this UpdateRuleAclDto.\n\n 描述\n\n :param description: The description of this UpdateRuleAclDto.\n :type description: str\n \"\"\"\n self._description = description\n\n @property\n def long_connect_time_hour(self):\n \"\"\"Gets the long_connect_time_hour of this UpdateRuleAclDto.\n\n 长连接时长小时\n\n :return: The long_connect_time_hour of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._long_connect_time_hour\n\n @long_connect_time_hour.setter\n def long_connect_time_hour(self, long_connect_time_hour):\n \"\"\"Sets the long_connect_time_hour of this UpdateRuleAclDto.\n\n 长连接时长小时\n\n :param long_connect_time_hour: The long_connect_time_hour of this UpdateRuleAclDto.\n :type long_connect_time_hour: int\n \"\"\"\n self._long_connect_time_hour = long_connect_time_hour\n\n @property\n def long_connect_time_minute(self):\n \"\"\"Gets the long_connect_time_minute of this UpdateRuleAclDto.\n\n 长连接时长分钟\n\n :return: The long_connect_time_minute of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._long_connect_time_minute\n\n @long_connect_time_minute.setter\n def long_connect_time_minute(self, long_connect_time_minute):\n \"\"\"Sets the long_connect_time_minute of this UpdateRuleAclDto.\n\n 长连接时长分钟\n\n :param long_connect_time_minute: The long_connect_time_minute of this UpdateRuleAclDto.\n :type long_connect_time_minute: int\n \"\"\"\n self._long_connect_time_minute = long_connect_time_minute\n\n @property\n def long_connect_time_second(self):\n \"\"\"Gets the long_connect_time_second of this UpdateRuleAclDto.\n\n 长连接时长秒\n\n :return: The long_connect_time_second of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._long_connect_time_second\n\n @long_connect_time_second.setter\n def long_connect_time_second(self, long_connect_time_second):\n \"\"\"Sets the long_connect_time_second of this UpdateRuleAclDto.\n\n 长连接时长秒\n\n :param long_connect_time_second: The long_connect_time_second of this UpdateRuleAclDto.\n :type long_connect_time_second: int\n \"\"\"\n self._long_connect_time_second = long_connect_time_second\n\n @property\n def long_connect_time(self):\n \"\"\"Gets the long_connect_time of this UpdateRuleAclDto.\n\n 长连接时长\n\n :return: The long_connect_time of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._long_connect_time\n\n @long_connect_time.setter\n def long_connect_time(self, long_connect_time):\n \"\"\"Sets the long_connect_time of this UpdateRuleAclDto.\n\n 长连接时长\n\n :param long_connect_time: The long_connect_time of this UpdateRuleAclDto.\n :type long_connect_time: int\n \"\"\"\n self._long_connect_time = long_connect_time\n\n @property\n def long_connect_enable(self):\n \"\"\"Gets the long_connect_enable of this UpdateRuleAclDto.\n\n 是否支持长连接,0表示不支持,1表示支持\n\n :return: The long_connect_enable of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._long_connect_enable\n\n @long_connect_enable.setter\n def long_connect_enable(self, long_connect_enable):\n \"\"\"Sets the long_connect_enable of this UpdateRuleAclDto.\n\n 是否支持长连接,0表示不支持,1表示支持\n\n :param long_connect_enable: The long_connect_enable of this UpdateRuleAclDto.\n :type long_connect_enable: int\n \"\"\"\n self._long_connect_enable = long_connect_enable\n\n @property\n def source(self):\n \"\"\"Gets the source of this UpdateRuleAclDto.\n\n :return: The source of this UpdateRuleAclDto.\n :rtype: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n \"\"\"\n return self._source\n\n @source.setter\n def source(self, source):\n \"\"\"Sets the source of this UpdateRuleAclDto.\n\n :param source: The source of this UpdateRuleAclDto.\n :type source: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n \"\"\"\n self._source = source\n\n @property\n def destination(self):\n \"\"\"Gets the destination of this UpdateRuleAclDto.\n\n :return: The destination of this UpdateRuleAclDto.\n :rtype: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n \"\"\"\n return self._destination\n\n @destination.setter\n def destination(self, destination):\n \"\"\"Sets the destination of this UpdateRuleAclDto.\n\n :param destination: The destination of this UpdateRuleAclDto.\n :type destination: :class:`huaweicloudsdkcfw.v1.RuleAddressDto`\n \"\"\"\n self._destination = destination\n\n @property\n def service(self):\n \"\"\"Gets the service of this UpdateRuleAclDto.\n\n :return: The service of this UpdateRuleAclDto.\n :rtype: :class:`huaweicloudsdkcfw.v1.RuleServiceDto`\n \"\"\"\n return self._service\n\n @service.setter\n def service(self, service):\n \"\"\"Sets the service of this UpdateRuleAclDto.\n\n :param service: The service of this UpdateRuleAclDto.\n :type service: :class:`huaweicloudsdkcfw.v1.RuleServiceDto`\n \"\"\"\n self._service = service\n\n @property\n def type(self):\n \"\"\"Gets the type of this UpdateRuleAclDto.\n\n 规则type,0:互联网规则,1:vpc规则,2:nat规则\n\n :return: The type of this UpdateRuleAclDto.\n :rtype: int\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"Sets the type of this UpdateRuleAclDto.\n\n 规则type,0:互联网规则,1:vpc规则,2:nat规则\n\n :param type: The type of this UpdateRuleAclDto.\n :type type: int\n \"\"\"\n self._type = type\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, UpdateRuleAclDto):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-cfw/huaweicloudsdkcfw/v1/model/update_rule_acl_dto.py","file_name":"update_rule_acl_dto.py","file_ext":"py","file_size_in_byte":16631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"265659101","text":"from sqlite3 import connect\nfrom os import path\n\n\nclass Scores:\n def __init__(self, score_file='scores.sqlite'):\n self.conn = connect(score_file)\n self.cur = self.conn.cursor()\n\n # Get count of tables with name scores\n self.cur.execute(\n ''' SELECT count(*) FROM sqlite_master WHERE type='table' AND name='scores' ''')\n if self.cur.fetchone()[0] == 0:\n self.cur.execute(\n ''' CREATE TABLE scores (name TEXT, score INTEGER) ''')\n self.cur.execute(\"PRAGMA foreign_keys = ON;\")\n\n def get(self, command):\n self.cur.execute(command)\n return self.cur.fetchall()\n\n def upload_score(self, names, scores):\n if not isinstance(names, list) and not isinstance(scores, list):\n # add names and scores to db\n self.cur.execute('INSERT INTO scores (name, score) VALUES (?, ?)',\n (names, scores))\n elif isinstance(names, list) and isinstance(scores, list):\n for name, score in zip(names, scores):\n self.cur.execute('INSERT INTO scores (name, score) VALUES (?, ?)',\n (name, score))\n else:\n raise TypeError('Incompatibe types list and var.')\n\n # commit changes to db\n self.conn.commit()\n\n @property\n def leaderboard(self):\n leaderboard = []\n self.cur.execute('SELECT DISTINCT name FROM scores')\n # print(self.cur.fetchall())\n for name in self.cur.fetchall():\n self.cur.execute(\n 'SELECT score FROM scores WHERE name=\"{}\"'.format(name[0]))\n leaderboard.append((name, self.cur.fetchall()))\n return(sorted([(name[0], score[0]) for name, score in map(lambda x: (x[0], max(x[1])), leaderboard)], key=lambda x: x[1], reverse=True))\n\n @property\n def print_leaderboard(self):\n print('Leaderboard:')\n i = 0\n for champ in self.leaderboard:\n print('{} : {}'.format(champ[0], champ[1]))\n if i == 5:\n break\n i += 1\n\n\nif __name__ == '__main__':\n instance = Scores()\n\n # instance.upload_score('Gert', 20)\n instance.print_leaderboard\n instance.conn.close()\n\n# # get the names of all tables\n# instance.cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n# print(instance.cur.fetchall())\n\n# # get all entrys of the scores table\n# self.cur.execute('SELECT * FROM scores')\n# print(self.cur.fetchall())\n","sub_path":"scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"19990487","text":"from django.urls import path\nfrom . import views\n\napp_name = \"webdetail\"\n\nurlpatterns = [\n path('', views.greeting, name=\"greeting\"),\n path('qna/', views.qna, name=\"qna\"),\n path('mypage/', views.mypage, name=\"mypage\"),\n path('signup/', views.signup, name=\"signup\"),\n path('/', views.notfound, name=\"notfound\"),\n \n]\n\n\n","sub_path":"webdetail/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"305779796","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.middleware import csrf\n\n\ndef home(request):\n context = {\n \"controller\": {\n \"name\": \"HomeCtlr\",\n \"as\": \"home\"\n },\n \"page\": {\n \"current\": {\n \"name\": \"Home\",\n \"icon\": \"fa-home\"\n },\n \"parents\": [],\n }\n }\n template = \"broadclass/home.html\"\n\n return render(request, template, context)\n\ndef get_csrf_token(request):\n return JsonResponse(csrf.get_token(request), safe=False)\n","sub_path":"broadclass/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364434634","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('add_list', '0007_auto_20150606_1024'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProductImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('product_images', models.FileField(upload_to='images')),\n ('SubmitClassifiedAd', models.ForeignKey(related_name='product_images', to='add_list.SubmitClassifiedAd')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='submitclassifiedad',\n name='product_images',\n ),\n migrations.AlterField(\n model_name='submitclassifiedad',\n name='ad_posted_on',\n field=models.DateTimeField(default=datetime.datetime(2015, 6, 6, 11, 4, 44, 939554), verbose_name='posted on', blank=True),\n ),\n ]\n","sub_path":"add_list/migrations/0008_auto_20150606_1104.py","file_name":"0008_auto_20150606_1104.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"158812493","text":"from collections import Counter\r\n\r\nword1 = input()\r\nword2 = input()\r\n\r\nif len(word1) != len(word2):\r\n print(\"N\")\r\n\r\nelse:\r\n c1 = Counter(word1)\r\n c2 = Counter(word2)\r\n star_count = c2[\"*\"]\r\n\r\n is_ok = True\r\n\r\n for t1 in c1.items():\r\n letter = t1[0]\r\n count1 = t1[1]\r\n count2 = c2[letter]\r\n\r\n if count2 > count1:\r\n is_ok = False\r\n break\r\n\r\n elif count2 < count1:\r\n star_count -= (count1 - count2)\r\n if star_count < 0:\r\n is_ok = False\r\n break\r\n\r\n if is_ok:\r\n print(\"A\")\r\n else:\r\n print(\"N\")","sub_path":"2016 Contest/Problem S1.py","file_name":"Problem S1.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264519210","text":"# -*- coding: utf-8 -*-\n\n\nimport asyncio\nimport click\nimport importlib\nimport logging\nimport pkg_resources\nimport signal\nimport sys\n\nfrom inspect import iscoroutine\n\nfrom gitmesh.server import serve_until\nfrom gitmesh.storage import Storage\n\n\ndef find_entry_points(group):\n for entry_point in pkg_resources.iter_entry_points(group):\n module = importlib.import_module('.'.join(\n [entry_point.module_name] + list(entry_point.attrs[:-1])\n ))\n yield entry_point.name, getattr(module, entry_point.attrs[-1])\n\n\n@click.group()\ndef cli():\n pass\n\n\ndef _await(loop, r):\n \"\"\"Await expression for regular functions.\"\"\"\n if iscoroutine(r) or isinstance(r, asyncio.Future):\n r = loop.run_until_complete(r)\n return r\n\n\n@cli.command(name='pre-receive')\ndef pre_receive():\n \"\"\"Git pre-receive hook.\"\"\"\n loop = asyncio.get_event_loop()\n pre_receive_hooks = list(find_entry_points('gitmesh.pre_receive'))\n updates = [\n line.strip().split(' ', 2) for line in sys.stdin\n ]\n updates = {\n update[2]: (update[0], update[1]) for update in updates\n }\n for _, pre_receive_hook in pre_receive_hooks:\n print('Running hook %r.' % _)\n _await(loop, pre_receive_hook(updates=updates))\n\n\n@cli.command(name='update')\n@click.argument('ref')\n@click.argument('old')\n@click.argument('new')\ndef update(ref, old, new):\n \"\"\"Git pre-receive hook.\"\"\"\n loop = asyncio.get_event_loop()\n update_hooks = list(find_entry_points('gitmesh.update'))\n for _, update_hook in update_hooks:\n print('Running hook %r.' % _)\n _await(loop, update_hook(ref=ref, old=old, new=new))\n\n\n@cli.command(name='post-receive')\ndef post_receive():\n \"\"\"Git post-receive hook.\"\"\"\n loop = asyncio.get_event_loop()\n post_receive_hooks = list(find_entry_points('gitmesh.post_receive'))\n updates = [\n line.strip().split(' ', 2) for line in sys.stdin\n ]\n updates = {\n update[2]: (update[0], update[1]) for update in updates\n }\n for _, post_receive_hook in post_receive_hooks:\n print('Running hook %r.' % _)\n _await(loop, post_receive_hook(updates=updates))\n\n\n@cli.command(name='post-update')\n@click.argument('refs', nargs=-1)\ndef post_update(refs):\n \"\"\"Git post-update hook.\"\"\"\n loop = asyncio.get_event_loop()\n post_update_hooks = list(find_entry_points('gitmesh.post_update'))\n for _, post_update_hook in post_update_hooks:\n print('Running hook %r.' % _)\n _await(loop, post_update_hook(refs=list(refs)))\n\n\n@cli.command(name='serve')\n@click.option('--host', default='0.0.0.0')\n@click.option('--port', default=8080)\ndef serve(host, port):\n \"\"\"Run the server until SIGINT/CTRL-C is received.\"\"\"\n\n # Configure logging.\n logging.basicConfig()\n logging.getLogger('aiohttp.access').setLevel(logging.DEBUG)\n\n # Pick the right event loop.\n if sys.platform == 'win32': # pragma: no cover\n asyncio.set_event_loop(asyncio.ProactorEventLoop())\n loop = asyncio.get_event_loop()\n\n # Await a SIGINT/CTRL-C event.\n cancel = asyncio.Future()\n if sys.platform == 'win32': # pragma: no cover\n pass\n else:\n loop.add_signal_handler(signal.SIGINT, cancel.set_result, None)\n\n # Serve \"forever\".\n loop.run_until_complete(serve_until(\n cancel,\n storage=Storage('.'),\n host=host, port=port,\n ))\n\n\ndef main():\n \"\"\"Setuptools \"console_script\" entry point.\"\"\"\n return cli()\n\n\n# Required for `python -m gitmesh`.\nif __name__ == '__main__': # pragma: no cover\n main()\n","sub_path":"gitmesh/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"66816253","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\n\nimport fabric.api as fab\n\n\nfrom cloudify_cli.cli import get_global_verbosity\nfrom cloudify_cli import utils\nfrom cloudify_cli.exceptions import CloudifyCliError\n\n\ndef get_manager_date():\n # output here should be hidden anyway.\n with fab.settings(fab.hide('running', 'stdout')):\n return run_command_on_manager('date +%Y%m%dT%H%M%S').stdout\n\n\ndef get_file_from_manager(remote_source_path, destination_path):\n key_filename = os.path.expanduser(utils.get_management_key())\n with fab.settings(\n fab.hide('running', 'stdout'),\n host_string=utils.build_manager_host_string(),\n key_filename=key_filename,\n port=utils.get_management_port()):\n fab.get(remote_source_path, destination_path)\n\n\ndef put_file_in_manager(source_path,\n remote_source_path,\n use_sudo=True,\n key_filename=None,\n user=None,\n port=''):\n port = port or utils.get_management_port()\n if not key_filename:\n key_filename = os.path.expanduser(utils.get_management_key())\n with fab.settings(\n fab.hide('running', 'stdout'),\n host_string=utils.build_manager_host_string(user=user),\n key_filename=key_filename,\n port=port):\n fab.put(use_sudo=use_sudo,\n local_path=source_path,\n remote_path=remote_source_path)\n\n\ndef run_command_on_manager(command,\n use_sudo=False,\n open_shell=False,\n host_string='',\n force_output=False):\n \"\"\"Runs an SSH command on a Manager.\n\n `open_shell` opens an interactive shell to the server.\n `host_string` can be explicitly provided to save on REST calls.\n `force_output` forces all output as if running in verbose.\n \"\"\"\n host_string = host_string or utils.build_manager_host_string()\n port = utils.get_management_port()\n\n def execute():\n key_filename = os.path.expanduser(utils.get_management_key())\n with fab.settings(\n host_string=host_string,\n key_filename=key_filename,\n port=port,\n warn_only=True):\n if use_sudo:\n output = fab.sudo(command)\n elif open_shell:\n fab.open_shell(command)\n return None\n else:\n output = fab.run(command)\n if output.failed:\n raise CloudifyCliError(\n 'Failed to execute: {0} ({1})'.format(\n output.real_command, output.stderr))\n return output\n\n if get_global_verbosity() or force_output:\n return execute()\n else:\n with fab.hide('running', 'stdout', 'stderr', 'warnings'):\n return execute()\n","sub_path":"cloudify_cli/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"66498707","text":"'''\nCreated on 2 Feb 2011\n\n@author: Simon Bull\n'''\n\ndef main(fileToCheck, minLength=-1, maxLength=-1):\n \"\"\"Determines whether fileToCheck is appropriately formatted as a fasta file.\n \n An appropriately formatted fasta file is returned in correctFormatting.\n \n Fasta files are accepted if they have the format:\n >PID1\n letters\n >PID2\n letters\n \n Where PID1 and PID2 can be anything, and letters are a (possibly multiline) sequence of alphabetic letters.\n The letters can be upper or lower case, and each letter is interpreted as one amino acid.\n The correctly formatted fasta file is returned with the sqeuence only going over one line, and all letters\n in upper case.\n \n If a protein (i.e. a fasta information line) appears in the file more than one time, then the final appearance is\n taken to be the correct one. Prior appearances are discarded.\n \n @param fileToCheck: The input file to check for appropriate FASTA formatting.\n @type fileToCheck: string\n @param correctFormatting: The output location for the correctly formatted FASTA file.\n @type correctFormatting: string\n \n \"\"\"\n \n lineCount = 1\n proteinCount = 0\n protDescription = True\n firstLine = True\n \n proteinsInFile = {}\n \n # The first line of a well formatted FASTA file will start with a '>'.\n # Following this will be a single line of uppercase letters denoting the amino acid sequence.\n # This alternation of lines beginning with '>' and lines of all uppercase letters will continue until\n # the file ends with a line of all uppercase letters.\n checking = fileToCheck.rstrip()\n checking = checking.lstrip()\n checking = checking.split('\\n')\n for line in checking:\n line = line.rstrip()\n if firstLine:\n if line[0] == '>':\n currentProt = line\n protDescription = False\n firstLine = False\n currentSeq = ''\n else:\n # If the line was not the beginning of a protein record, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n elif protDescription:\n # This is true only if a line beginning with a > is expected.\n if line[0] == '>':\n if minLength == -1:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n currentProt = line\n protDescription = False\n else:\n # If the line does not begin with a >, and it is expected to, it is possible that\n # the amino acid sequence is split over multiple lines.\n if line.isalpha():\n # Check if every character on the line is a letter.\n # If every character is then write out the line in upper case letters.\n currentSeq += line.upper()\n else:\n # If the line was not formatted as an amino acid sequence, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n else:\n # If an amino acid sequence is expected\n if line.isalpha():\n # If the line is all alphabetic characters, write the line out and indicate that we are expecting a\n # protein description line next (i.e. one beginning with a >).\n currentSeq = line.upper()\n protDescription = True\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to contain only letters, but instead got: \" + line\n return 2, errorMessage\n \n lineCount += 1\n \n # Catch the final protein from the file.\n if minLength == -1:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n\n if len(proteinsInFile.keys()) < 2:\n # There are to few protein sequences entered\n errorMessage = (\"Not enough unique protein sequences have been entered.\" +\n \" This is possibly caused by not enough sequences of the required length being provided.\"\n )\n return 3, errorMessage\n elif len(proteinsInFile.keys()) > 500:\n # There are to many protein sequences entered\n errorMessage = \"Too many protein sequences have been entered. No more than 500 sequences can be entered. In order to cull more than 500 sequences, please download the standalone version of Leaf from http://www.bioinf.manchester.ac.uk/leaf/downloads/#SourceCode.\"\n return 3, errorMessage\n elif protDescription:\n # Return an indication that the FASTA file is correctly formatted\n outputString = ''\n for i in proteinsInFile.keys():\n outputString += i + '\\n' + proteinsInFile[i] + '\\n'\n return 0, outputString[:-1]\n else:\n # The file did not end with a protein sequence\n errorMessage = \"Reached the end of the file, but no protein sequence found for the final protein.\"\n return 3, errorMessage","sub_path":"LeafWebApp/Leaf/cullinput/checkfastaformat.py","file_name":"checkfastaformat.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"487993293","text":"import time\nimport unittest\nimport smtplib\n\ntry:\n import mox\nexcept ImportError:\n raise ImportError(\"Python MOX must be installed to run this unittest. \"\n \"http://code.google.com/p/pymox/\")\n\nfrom WMCore.Configuration import ConfigSection\nfrom WMCore.Alerts.Alert import Alert\nimport WMCore.Alerts.ZMQ.Sinks.EmailSink as EmailSinkMod\nfrom WMCore.Alerts.ZMQ.Sinks.EmailSink import EmailSink\n\n\n\nclass EmailSinkTest(unittest.TestCase):\n def setUp(self):\n self.config = ConfigSection(\"email\")\n self.config.fromAddr = \"some@local.com\"\n self.config.toAddr = [\"some1@local.com\", \"some2@local.com\"]\n self.config.smtpServer = \"smtp.gov\"\n self.config.smtpUser = None\n self.config.smtpPass = None\n\n # now we want to mock smtp emailing stuff - via pymox - no actual\n # email sending to happen\n self.mox = mox.Mox()\n self.smtpReal = EmailSinkMod.smtplib\n EmailSinkMod.smtplib = self.mox.CreateMock(EmailSinkMod.smtplib)\n self.smtp = self.mox.CreateMockAnything()\n\n\n def tearDown(self):\n self.mox.UnsetStubs()\n EmailSinkMod.smtplib = self.smtpReal\n\n\n def testEmailSinkBasic(self):\n # pre-generate the entire email message\n subj = \"Alert from %s\" % None # this is default Alert value for HostName\n msg = EmailSink.EMAIL_HEADER % (self.config.fromAddr, subj,\n \", \".join(self.config.toAddr))\n alerts = []\n for i in range(10):\n a = Alert(Source=__file__, Level = i, Timestamp = time.time(), Type = \"Test\")\n msg += \"\\n%s\\n\" % a.toMsg()\n alerts.append(a)\n\n # method calls definition, ordered\n EmailSinkMod.smtplib.SMTP(self.config.smtpServer).AndReturn(self.smtp) # 1\n # leave for test / debugging\n # self.smtp.sendmail('a@b.com', 'a@b.com', 'Subject: subject\\n\\nbody')\n self.smtp.sendmail(self.config.fromAddr, self.config.toAddr, msg) # 2\n self.smtp.quit() # 3\n\n self.mox.ReplayAll()\n\n sink = EmailSink(self.config) # 1\n # leave for test / debugging\n #self.smtp.sendmail('a@b.com', 'a@b.com', 'Subject: subject\\n\\nbody')\n sink.send(alerts) # 2\n del sink # 3\n\n self.mox.VerifyAll()\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/python/WMCore_t/Alerts_t/ZMQ_t/Sinks_t/EmailSink_t.py","file_name":"EmailSink_t.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"589767533","text":"from database.session import rest_api\nfrom flask_restful import Resource, reqparse, fields, marshal\nfrom database.session import db\nfrom flask import session\n\n\nclass Team(Resource):\n _team_fields = {\n 'id': fields.Integer(attribute='Employee_ID'),\n 'username': fields.String(attribute='Username'),\n 'name': fields.String(attribute='Full_Name'),\n 'admin': fields.String(attribute='Admin')\n }\n\n def get(self):\n if session['user'] is None:\n return {'status': 'failed', 'reason': 'not logged in'}\n\n team = db.engine.execute(\"\"\"\n SELECT Employee_ID, First_Name || ' ' || Last_Name as Full_Name, Username, Admin FROM Employee\n \"\"\").fetchall()\n\n return {'status': 'success', 'team': marshal(team, self._team_fields)}\n\n\nrest_api.add_resource(Team, '/api/team')\n","sub_path":"server/api/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"546250806","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 30 23:31:56 2023\n\n@author: zhenchen\n\n@disp: sddp for multi period newsvendor, lost sale variable B in the objective function;\n \nmore length of the planning horizon, more iterations to converge;\nhave backorder;\n \n \n\"\"\"\n\nfrom sample_tree import generate_sample, get_tree_strcture, getSizeOfNestedList\nfrom gurobipy import *\nimport time\nfrom functools import reduce\nimport itertools\nimport random\nimport time\n\n\nstart = time.process_time()\nini_I = 0\nvari_cost = 1\nprice = 10\nunit_back_cost = 10\nunit_hold_cost = 2\nmean_demands = [10, 20]\nsample_nums = [10, 10]\nT = len(mean_demands)\ntrunQuantile = 0.9999 # affective to the final ordering quantity\nscenario_numTotal = reduce(lambda x, y: x * y, sample_nums, 1)\n\n# samples_detail is the detailed samples in each period\nsamples_detail = [[0 for i in range(sample_nums[t])] for t in range(T)] \nfor t in range(T):\n samples_detail[t] = generate_sample(sample_nums[t], trunQuantile, mean_demands[t])\n\n# samples_detail = [[5, 15], [5, 15], [5, 15]]\nscenarios = list(itertools.product(*samples_detail)) \nsample_num = 30\nsamples= random.sample(scenarios, sample_num) # sampling without replacement\nsamples.sort() # sort to make same numbers together\nnode_values, node_index = get_tree_strcture(samples)\n\ntheta_iniValue = -300 # initial theta values in each period\nm = Model() # linear model in the first stage\n# decision variable in the first stage model\nq = m.addVar(vtype = GRB.CONTINUOUS, name = 'q_1')\ntheta = m.addVar(lb = theta_iniValue*T, vtype = GRB.CONTINUOUS, name = 'theta_2')\n\n# number of nodes in each period\nt_nodeNum = [0 for i in range(T)]\nfor t in range(T):\n t_nodeNum[t] = getSizeOfNestedList(node_values[t])\n# decision variables from stage 2 to stage T+1\nm_sub = [[Model() for j in range(t_nodeNum[t])] for t in range(T)] \nq_sub = [[m_sub[t][j].addVar(vtype = GRB.CONTINUOUS, name = 'q_' + str(t+2) + '^' + str(j+1)) for j in range(t_nodeNum[t])] for t in range(T-1)]\nI_sub = [[m_sub[t][j].addVar(vtype = GRB.CONTINUOUS, name = 'I_' + str(t+1) + '^' + str(j+1)) for j in range(t_nodeNum[t])] for t in range(T)]\nB_sub = [[m_sub[t][j].addVar(vtype = GRB.CONTINUOUS, name = 'B_' + str(t+1) + '^' + str(j+1)) for j in range(t_nodeNum[t])] for t in range(T)]\ntheta_sub = [[m_sub[t][j].addVar(lb = -GRB.INFINITY, vtype = GRB.CONTINUOUS, name = 'theta_' + str(t+3) + '^' + str(j+1)) for j in range(t_nodeNum[t])] for t in range(T-1)]\n\niter = 1\niter_num = 15\npi_sub_detail_values = [[[[] for s in range(t_nodeNum[t])] for t in range(T)] for iter in range(iter_num)] \nrhs_sub_detail_values = [[[[] for s in range(t_nodeNum[t])] for t in range(T)] for iter in range(iter_num)] \nq_detail_values = [[[] for t in range(T)] for iter in range(iter_num)] \nfor i in range(iter_num):\n for t in range(T):\n if t == 0:\n q_detail_values[i][t] = 0\n else:\n q_detail_values[i][t] = [0 for s in range(t_nodeNum[t-1])]\n\nwhile iter <= iter_num: \n \n # forward computation \n # solve the first stage model \n m.setObjective(vari_cost*q + theta, GRB.MINIMIZE)\n m.update()\n m.optimize()\n m.write('iter' + str(iter) + '_main.lp')\n m.write('iter' + str(iter) + '_main.sol')\n \n print(end = '')\n q_value = q.x\n q_detail_values[iter - 1][0] = q_value\n theta_value = theta.x\n z = m.objVal\n \n I_sub_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)] \n B_sub_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)] \n pi_sub_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)] \n pi_rhs_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)] \n d_sub_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)]\n pi_rhs_values = [[0 for s in range(t_nodeNum[t])] for t in range(T)] \n # forward and backward \n for t in range(T): \n for j in range(t_nodeNum[t]): \n obj = [0.0 for i in range(t_nodeNum[t])] \n index = node_index[t][j][0]\n demand = samples[index][t]\n if t == 0: \n if T > 1:\n m_sub[t][j].setObjective(vari_cost*q_sub[t][j] + unit_hold_cost*I_sub[t][j] - price*(demand - B_sub[t][j]) +theta_sub[t][j], GRB.MINIMIZE)\n m_sub[t][j].addConstr(theta_sub[t][j] >= theta_iniValue*(T-1-t))\n else:\n m_sub[t][j].setObjective(unit_hold_cost*I_sub[t][j] - price*(demand - B_sub[t][j]), GRB.MINIMIZE)\n m_sub[t][j].addConstr(I_sub[t][j] - B_sub[t][j] == ini_I + q_value - demand)\n print('') \n else:\n if t == T - 1: \n m_sub[t][j].setObjective(unit_hold_cost*I_sub[t][j] - price*(demand - B_sub[t][j]), GRB.MINIMIZE)\n else:\n m_sub[t][j].setObjective(vari_cost*q_sub[t][j] + unit_hold_cost*I_sub[t][j] - price*(demand - B_sub[t][j]) +theta_sub[t][j], GRB.MINIMIZE)\n m_sub[t][j].addConstr(theta_sub[t][j] >= theta_iniValue*(T-1-t))\n last_index = 0\n for k in node_index[t - 1]:\n if node_index[t][j][0] in k:\n last_index = node_index[t - 1].index(k)\n m_sub[t][j].addConstr(I_sub[t][j] - B_sub[t][j] == I_sub_values[t-1][last_index] - B_sub_values[t-1][last_index] + q_detail_values[iter-1][t][last_index] - demand)\n print(end = '')\n \n # optimize\n m_sub[t][j].optimize()\n # m_sub[t][j].write('iter' + str(iter) + '_sub_' + str(t+1) + '^' + str(j+1) + '.lp')\n # m_sub[t][j].write('iter' + str(iter) + '_sub_' + str(t+1) + '^' + str(j+1) + '.dlp')\n # m_sub[t][j].write('iter' + str(iter) + '_sub_' + str(t+1) + '^' + str(j+1) + '.sol')\n obj[j] = m_sub[t][j].objVal\n if t < T - 1: \n q_detail_values[iter - 1][t+1][j] = q_sub[t][j].x\n \n I_sub_values[t][j] = I_sub[t][j].x \n B_sub_values[t][j] = B_sub[t][j].x\n pi = m_sub[t][j].getAttr(GRB.Attr.Pi)\n pi_sub_detail_values[iter-1][t][j] = pi\n rhs = m_sub[t][j].getAttr(GRB.Attr.RHS)\n rhs_sub_detail_values[iter-1][t][j] = rhs\n \n if iter == 2:\n pass\n # if t < T - 1:\n num_con = len(pi)\n for k in range(num_con - 1): # all the previous constraints\n pi_rhs_values[t][j] += pi[k]*rhs[k] # should not include the inventory flow constrints (q inside)\n pi_rhs_values[t][j] += -pi[-1] * demand - price*demand # the inventory flow constraints\n # else:\n # pi_rhs_values[t][j] = -pi[-1] * demand - price*demand\n pi_sub_values[t][j] = pi[-1]\n d_sub_values[t][j] = demand\n # so hyperplane cuts are always in the front\n m_sub[t][j].remove(m_sub[t][j].getConstrs()[-1]) # inventory flow\n if t < T - 1:\n m_sub[t][j].remove(m_sub[t][j].getConstrs()[-2]) # theta bound constraint\n \n # get and add the cut \n # very important\n if iter == 2:\n pass\n avg_pi = sum(pi_sub_values[t]) / t_nodeNum[t]\n sum_pi_rhs = 0\n for j in range(t_nodeNum[t]): \n sum_pi_rhs += pi_rhs_values[t][j]\n avg_pi_rhs = sum_pi_rhs / t_nodeNum[t]\n if t == 0:\n # should have more\n m.addConstr(theta >= avg_pi*q + avg_pi_rhs) # just the benders optimality cut, same as the above constraint\n # m.write('test.lp')\n print(end='')\n else:\n for j in range(t_nodeNum[t-1]): \n m_sub[t-1][j].addConstr(theta_sub[t-1][j] >= avg_pi*(I_sub[t-1][j] + q_sub[t-1][j]) + avg_pi_rhs)\n m_sub[t-1][j].update()\n print(end='')\n \n iter += 1\n\nend = time.process_time()\nprint('********************************************')\nprint('final expected total costs is %.2f' % z)\nprint('ordering Q in the first peiod is %.2f' % q_value)\ncpu_time = end - start\nprint('cpu time is %.3f s' % cpu_time)\n\n","sub_path":"linear programming/gurobi/NLDS/price_newsVendor.py","file_name":"price_newsVendor.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"613666419","text":"#***************************************************************\n#\n# Developer: \n#\n# Program #: \n#\n# File Name: \n#\n# Course: COSC 1336 Programming Fundamentals I \n#\n# Due Date: \n#\n# Instructor: Fred Kumi \n#\n# Chapter: \n#\n# Description:\n# \n#\n#***************************************************************\n\nBASE_YEAR = 1903\n\n#***************************************************************\n#\n# Function: main\n# \n# Description: The main function of the program\n#\n# Parameters: None\n#\n# Returns: Nothing \n#\n#**************************************************************\ndef main():\n # Local dictionary variables\n year_dict = {}\n count_dict = {}\n\t\n developerInfo()\n\t\n # Open the file for reading\n input_file = open('Program11.txt', 'r')\n\n\n \n # End of the main function\n\n#***************************************************************\n#\n# Function: main\n# \n# Description: The main function of the program\n#\n# Parameters: None\n#\n# Returns: Nothing \n#\n#**************************************************************\ndef showResults(year_dict, count_dict):\n \n # Receive user input\n year = int(input('Enter a year in the range 1903-2019: '))\n\n # Print results\n if year == 1904 or year == 1994:\n print(\"The world series wasn't played in the year\", year)\n elif year < 1903 or year > 2019:\n print('The data for the year', year, \\\n 'is not included in our database.')\n else:\n winner = year_dict[year]\n wins = count_dict[winner]\n print('The team that won the world series in ', \\\n year, ' is the ', winner, '.', sep='')\n print('They have won the world series', wins, 'times.')\n \n # End of showResults\n\n#***************************************************************\n#\n# Function: developerInfo\n# \n# Description: Prints Programmer's information\n#\n# Parameters: None\n#\n# Returns: Nothing \n#\n#**************************************************************\ndef developerInfo():\n print('Name: ')\n print('Course: Programming Fundamentals I')\n print('Program: Eleven')\n print()\n # End of the developerInfo function\n\n# Call the main function.\nmain()\n\n","sub_path":"fkumi/COSC1336 - Programming Fundamentals I/Programming Assignments/Program 11/Program11-Template.py","file_name":"Program11-Template.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"21348364","text":"#!/usr/bin/env python\n\n# clowder/clowder.py\n# Created by Greg Kiar on 2017-03-26.\n# Email: gkiar07@gmail.com\n# Copyright (c) 2017. All rights reserved.\n\nfrom argparse import ArgumentParser\ntry:\n from clowder import __file__, Schema, Cat, Cloud\nexcept:\n from clowder import __file__\n import Schema, Cat, Cloud\n\n\ndef clowder(docker_url, descriptor, credentials, name=None, meta=None):\n pass\n\n\ndef clowder_cli(args):\n parser = ArgumentParser(description=\"Driver for clowder - run to turn\"\n \" your application loose on the cloud!\")\n parser.add_argument(\"docker_url\", action=\"store\", help=\"The registry\"\n \" url for your Docker image. If on hub.docker.com/\"\n \"yourname/tool:tag, the url is then docker.io/...\")\n parser.add_argument(\"descriptor\", action=\"store\", help=\"A Boutiques\"\n \" descriptor for your tool. Note: two `custom` fields\"\n \" must be used: RAM and CPU, which describe resource\"\n \" requirements of your tool.\")\n parser.add_argument(\"credentials\", action=\"store\", help=\"AWS credentials\"\n \"file. The credentials must be for an admin user.\")\n parser.add_argument(\"--name\", \"-n\", action=\"store\", help=\"The name of your\"\n \"tool, if different from the Docker Hub repository.\")\n parser.add_argument(\"--meta\", \"-m\", action=\"store\", nargs=\"+\", help=\"Any\"\n \" additional metadata about your tool you wish to\"\n \" convey in your job descriptor. Must be of the form\"\n \" KEY1:VALUE1 KEY2:VALUE2 etc.\")\n inp = parser.parse_args(args)\n\n docker_url = inp.docker_url\n descriptor = inp.descriptor\n credentials = inp.credentials\n name = inp.name\n meta = inp.meta\n\n if meta is not None:\n print(\"crafting the meta\")\n\n clowder(docker_url, descriptor, credentials, name, meta)\n\n\nif __name__ == \"__main__\":\n clowder_cli(sys.argv[1:])\n","sub_path":"clowder/clowder.py","file_name":"clowder.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"170824518","text":"from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom core.models import Section\n# Create your models here.\n\nfrom rest_framework import routers, serializers, viewsets, parsers\n\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import LimitOffsetPagination\n\nclass AsocFieldType(models.Model):\n\tname = models.CharField(max_length=200)\n\tt = models.CharField(max_length=10)\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass AsocForm(models.Model):\n\tname = models.CharField(max_length=200)\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass AsocFormField(models.Model):\n\tform = models.ForeignKey(AsocForm, related_name=\"fields\")\n\tlabel = models.CharField(max_length=200)\n\tfield_name = models.CharField(max_length=500)\n\tdescription = models.TextField()\n\tt = models.ForeignKey(AsocFieldType)\n\torder = models.IntegerField(default=10)\n\tt_options = models.TextField(null=True, blank=True)\n\trequired = models.BooleanField(default=True)\n\n\tdef __str__(self):\n\t\treturn str(self.form) + \" - \" + self.label\n\nclass CompiledForm(models.Model):\n\tsection = models.ForeignKey(Section)\n\tform = models.ForeignKey(AsocForm)\n\tauthor = models.ForeignKey(User, null=True, blank=True)\n\tauthor_name = models.CharField(max_length=300, null=True, blank=True)\n\tdate = models.DateTimeField(auto_now=True, null=True, blank=True)\n\tsaved = models.BooleanField(default=True)\n\tpublished = models.BooleanField(default=False)\n\n\traw = models.TextField(null=True, blank=True)\n\nclass CompiledFormField(models.Model):\n\tform = models.ForeignKey(CompiledForm, related_name=\"fields\")\n\tfield = models.ForeignKey(AsocFormField)\n\tvalue = models.CharField(max_length=2000)\n\tdate = models.DateTimeField(auto_now=True)\n\tvalid = models.BooleanField(default=True)\n\nfrom rest_framework.pagination import PageNumberPagination\nclass StandardResultsSetPagination(PageNumberPagination):\n page_size = 25\n page_size_query_param = 'page_size'\n max_page_size = 100\n\n\nclass AsocFieldTypeSerializer(serializers.ModelSerializer):\n class Meta: \n model = AsocFieldType\n\nclass AsocFormFieldSerializer(serializers.ModelSerializer):\n t = AsocFieldTypeSerializer()\n \n def to_representation(self, instance):\n ret = super(AsocFormFieldSerializer, self).to_representation(instance)\n ret['t_options'] = ret['t_options'].split(\"\\r\\n\")\n ret[\"choices\"] = [{\"display_name\":c,\"value\":c} for c in ret[\"t_options\"]]\n return ret\n class Meta: \n model = AsocFormField\n \nclass AsocFieldTypeViewSet(viewsets.ModelViewSet):\n queryset = AsocFieldType.objects.all()\n serializer_class = AsocFieldTypeSerializer\n\nclass AsocFormSerializer(serializers.ModelSerializer):\n fields = AsocFormFieldSerializer(many=True)\n class Meta: \n model = AsocForm\n\nclass AsocFormViewSet(viewsets.ModelViewSet):\n queryset = AsocForm.objects.all()\n serializer_class = AsocFormSerializer\n\n\nclass AsocFormFieldViewSet(viewsets.ModelViewSet):\n queryset = AsocFormField.objects.all()\n serializer_class = AsocFormFieldSerializer\n\nclass CompiledFormFieldSerializer(serializers.HyperlinkedModelSerializer):\n class Meta: \n model = CompiledFormField\n\nclass CompiledFormSerializer(serializers.ModelSerializer):\n fields = CompiledFormFieldSerializer(many=True)\n form = AsocFormSerializer()\n class Meta: \n model = CompiledForm\n\n\nfrom rest_framework import filters\n\nclass CompiledFormViewSet(viewsets.ModelViewSet):\n queryset = CompiledForm.objects.filter(published=True)\n filter_backends = (filters.DjangoFilterBackend,)\n filter_fields = ('section','form', 'author')\n\n serializer_class = CompiledFormSerializer\n pagination_class = StandardResultsSetPagination\n\nclass CompiledFormFieldViewSet(viewsets.ModelViewSet):\n queryset = CompiledFormField.objects.all()\n serializer_class = CompiledFormFieldSerializer\n\n\n\n\nrouter = routers.DefaultRouter()\nrouter.register(\"fieldtype\", AsocFieldTypeViewSet)\nrouter.register(\"asocform\", AsocFormViewSet)\nrouter.register(\"asocformfield\", AsocFormFieldViewSet)\n\nrouter.register(\"compiledform\", CompiledFormViewSet)\nrouter.register(\"compiledformfield\", CompiledFormFieldViewSet)","sub_path":"metaform/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"382317984","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ztfy/scheduler/zodb.py\n# Compiled at: 2012-10-22 06:18:07\n__docformat__ = 'restructuredtext'\nfrom ztfy.scheduler.interfaces import IZODBPackingTask\nfrom ztfy.utils.interfaces import IZEOConnection\nfrom zope.component import queryUtility\nfrom zope.interface import implements\nfrom zope.schema.fieldproperty import FieldProperty\nfrom ztfy.scheduler.task import BaseTask\n\nclass ZODBPackingTask(BaseTask):\n \"\"\"ZODB packing task\"\"\"\n implements(IZODBPackingTask)\n zeo_connection = FieldProperty(IZODBPackingTask['zeo_connection'])\n pack_time = FieldProperty(IZODBPackingTask['pack_time'])\n\n def run(self, report):\n zeo_connection = queryUtility(IZEOConnection, self.zeo_connection)\n if zeo_connection is None:\n report.write('No ZEO connection. Task aborted.')\n return\n else:\n report.write('ZEO connection name = %s\\n' % self.zeo_connection)\n report.write('Packing transactions older than %d days\\n' % self.pack_time)\n storage, db = zeo_connection.getConnection(get_storage=True)\n try:\n db.pack(days=self.pack_time)\n report.write('\\nPack successful.\\n')\n finally:\n storage.close()\n\n return","sub_path":"pycfiles/ztfy.scheduler-0.5.2-py2.7/zodb.py","file_name":"zodb.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"219114476","text":"#This python script must be on the devsecops account\n#It will check if the event coming from s3 events, also check the dynamodb table in master accont if the featureLevel is full\n#it creates\n#masteraccountid, and s3dpcmgmtrole must be added for environment variables\n\nimport json\nimport boto3\nimport logging\nfrom io import BytesIO\nfrom gzip import GzipFile\nfrom botocore.exceptions import ClientError\nfrom boto3.dynamodb.conditions import Key, Attr\nimport os\nimport re\n\ndef aws_session(role_arn=None, session_name='my_session'):\n \"\"\"\n If role_arn is given assumes a role and returns boto3 session\n otherwise return a regular session with the current IAM user/role\n \"\"\"\n if role_arn:\n print(\"starting sts\")\n client = boto3.client('sts')\n response = client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)\n session = boto3.Session(\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'])\n return session\n else:\n return boto3.Session()\n\ndef processS3Entry(entry,list_interesting_eventnames,accountconfiguration):\n account = entry['recipientAccountId']\n confidential_kms_key_arn = accountconfiguration['confidentialKMSKey']\n if (entry['userIdentity']['type'] == 'AssumedRole'):\n regexp = re.compile('(.*)TSI_Base(.*)',re.IGNORECASE)\n print(regexp.match(entry['userIdentity']['sessionContext']['sessionIssuer']['arn']))\n\n if((regexp.match(entry['userIdentity']['sessionContext']['sessionIssuer']['arn'])) is not None) :\n return 'bailing because of recursion danger'\n ROLE_ARN = 'arn:aws:iam::' + account +':role/' + os.environ['s3dpcmgmtrole']\n session_assumed = aws_session(role_arn=ROLE_ARN, session_name='my_lambda')\n client = session_assumed.client('s3')\n try:\n tags = client.get_bucket_tagging(\n Bucket=entry['requestParameters']['bucketName']\n )\n except ClientError as e:\n #print(e.response)\n if e.response['Error']['Code'] == 'NoSuchTagSet':\n tagsetExists=False\n else:\n tagsetExists=True\n if (tagsetExists):\n for tag in tags['TagSet']:\n #We have DPC tag\n if tag['Key'] == 'DPC':\n dpcexists=True\n dpc=tag['Value']\n #tag is internal\n if ((dpc.lower() == 'internal')):\n try:\n encryption = client.get_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName']\n)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':\n encryption = \"none\"\n else:\n for serversideenc in encryption['ServerSideEncryptionConfiguration']['Rules']:\n if ((serversideenc['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] != 'AES256') or (serversideenc['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] != 'aws:kms')):\n print(\"Here comes aes\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n },\n ]\n }\n)\n if ((encryption==\"none\")):\n print(\"Here comes aes\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n },\n ]\n }\n)\n result = client.get_bucket_acl(Bucket=entry['requestParameters']['bucketName'])\n for grants in result['Grants']:\n if((grants['Grantee']['Type']=='Group') and (grants['Grantee']['URI']=='http://acs.amazonaws.com/groups/global/AllUsers')):\n client.put_bucket_acl(ACL='private',Bucket=entry['requestParameters']['bucketName'])\n #tag is confidential\n elif ((dpc.lower() == 'confidential')):\n try:\n encryption = client.get_bucket_encryption(\n Bucket=event['detail']['requestParameters']['bucketName']\n)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':\n encryption = \"none\"\n else:\n print(encryption)\n for serversideenc in encryption['ServerSideEncryptionConfiguration']['Rules']:\n if serversideenc['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] != 'aws:kms':\n print(\"Here comes aes\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'aws:kms',\n 'KMSMasterKeyID': confidential_kms_key_arn\n }\n },\n ]\n }\n)\n if ((encryption==\"none\")):\n print(\"Here comes aes\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'aws:kms',\n 'KMSMasterKeyID': confidential_kms_key_arn\n }\n },\n ]\n }\n)\n result = client.get_bucket_acl(Bucket=entry['requestParameters']['bucketName'])\n for grants in result['Grants']:\n if((grants['Grantee']['Type']=='Group') and (grants['Grantee']['URI']=='http://acs.amazonaws.com/groups/global/AllUsers')):\n client.put_bucket_acl(ACL='private',Bucket=event['detail']['requestParameters']['bucketName'])\n #tag is open so can be public\n elif (dpc.lower() == 'public'):\n result = client.get_bucket_acl(Bucket=entry['requestParameters']['bucketName'])\n for grants in result['Grants']:\n if((grants['Grantee']['Type']=='Group') and (grants['Grantee']['URI']=='http://acs.amazonaws.com/groups/global/AllUsers')):\n print(\"THIS IS FULLY OPEN!\")\n elif (dpc == 'open'):\n result = client.get_bucket_acl(Bucket=entry['requestParameters']['bucketName'])\n try:\n encryption = client.get_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName']\n)\n except ClientError as e:\n #print(e.response)\n print(\"No encryption found and it's okay\")\n else:\n print(\"It's encrypted, leaving as is it\")\n for grants in result['Grants']:\n if((grants['Grantee']['Type']=='Group') and (grants['Grantee']['URI']=='http://acs.amazonaws.com/groups/global/AllUsers')):\n client.put_bucket_acl(ACL='private',Bucket=entry['requestParameters']['bucketName'])\n #dpc is not internal confidential or open\n else:\n print(\"neither\")\n encryption = client.get_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName']\n)\n for rules in encryption['ServerSideEncryptionConfiguration']['Rules']:\n if rules['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] != \"AES256\":\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n },\n ]\n }\n)\n client.put_bucket_acl(ACL='private',Bucket=entry['requestParameters']['bucketName'])\n buckettags = client.get_bucket_tagging(Bucket=entry['requestParameters']['bucketName'])\n for idx, tagitem in enumerate(buckettags['TagSet']):\n if tagitem['Key'] == 'DPC':\n buckettags['TagSet'][idx]['Value'] = 'Internal'\n client.put_bucket_tagging(Bucket=entry['requestParameters']['bucketName'],\n Tagging={ 'TagSet' : buckettags['TagSet'] }\n)\n if (not dpcexists):\n print(\"No DPC tagset adding private acl, and dpc tagset to the bucket\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n },\n ]\n }\n)\n\n client.put_bucket_acl(ACL='private',Bucket=entry['requestParameters']['bucketName'])\n buckettags = client.get_bucket_tagging(Bucket=entry['requestParameters']['bucketName'])\n #print(buckettags)\n buckettags['TagSet'].append({\n 'Key': 'DPC',\n 'Value': 'Internal'\n })\n client.put_bucket_tagging(Bucket=entry['requestParameters']['bucketName'],\n Tagging= { 'TagSet' : buckettags['TagSet'] }\n)\n else:\n print(\"No tagset adding one to the bucket\")\n client.put_bucket_encryption(\n Bucket=entry['requestParameters']['bucketName'],\n ServerSideEncryptionConfiguration={\n 'Rules': [\n {\n 'ApplyServerSideEncryptionByDefault': {\n 'SSEAlgorithm': 'AES256'\n }\n },\n ]\n }\n)\n\n client.put_bucket_acl(ACL='private',Bucket=entry['requestParameters']['bucketName'])\n client.put_bucket_tagging(Bucket=entry['requestParameters']['bucketName'],\n Tagging={\n 'TagSet': [\n {\n 'Key': 'DPC',\n 'Value': 'Internal'\n },\n ]\n }\n)\n\n\n\n\n\ndef getAccountConfiguration(s3accountid):\n ROLE_ARN = 'arn:aws:iam::' + os.environ['masteraccountid'] + ':role/TSI_Base_DynamoDB_Read'\n print(ROLE_ARN)\n session_assumed = aws_session(role_arn=ROLE_ARN, session_name='dynamoquery')\n print(session_assumed)\n client = session_assumed.resource('dynamodb',region_name='eu-central-1')\n accounts_table = client.Table('accounts')\n result = accounts_table.query(KeyConditionExpression=Key('accountId').eq(s3accountid))\n print(result)\n return(result['Items'][0])\n\ndef lambda_handler(event, context):\n accountconfiguration = getAccountConfiguration(event['recipientAccountId'])\n if(accountconfiguration['featureLevel']=='full'):\n list_interesting_eventnames= [\"DeleteBucketCors\", \"DeleteBucketTagging\", \"CreateBucket\", \"PutBucketAcl\",\"PutBucketCors\",\"PutBucketPolicy\",\"PutBucketTagging\", \"PutBucketWebsite\",\"DeleteBucketEncryption\" ]\n processS3Entry(event,list_interesting_eventnames,accountconfiguration)\n else:\n print(\"Advanced features are disabled in dynamodb\")\n\n\n","sub_path":"Virago/src/lambda/cloudtrailS3DPC/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":11894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"52126274","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport re\r\nimport os\r\nimport urllib2\r\nimport ssl\r\nimport xml.etree.ElementTree as ET\r\n\r\nBASE_URL = \"http://10.244.172.97:8082/UTMSWebService2/test-runs/USD/Midrange/\"\r\n\r\ndef main():\r\n runIds,howRun = getRunIds()\r\n if(runIds is not None):\r\n print(\"Run IDs are: \"+str(runIds))\r\n for runId in runIds:\r\n print(\"get howrun of run id: \"+runId)\r\n #howRunEx = getHowRun(runId)\r\n #print(\"howrun is: \"+howRunEx)\r\n #setHowRun(runId,howRun)\r\n break\r\n else:\r\n print(\"No run id found.\")\r\n\r\ndef getRunIds():\r\n preFile = './Pre_TestSet---0.html'\r\n runIds = []\r\n howRun = 'Automated-Jenkins-ADEV'\r\n\r\n if(not os.path.isfile(preFile)):\r\n # try AX job\r\n jobName = str(sys.argv[1])\r\n buildNum = str(sys.argv[2])\r\n consoleUrl = 'https://128.222.8.83:8080/job/'+jobName+'/'+buildNum+'/logText/progressiveText?start=0'\r\n context = ssl._create_unverified_context()\r\n f = urllib2.urlopen(consoleUrl,timeout=10,context=context)\r\n preUrl = None\r\n for line in f:\r\n axLog = re.search(r'^Log\\s+:\\s+(.*)Main_Rollup---0\\.html',line)\r\n if(axLog):\r\n preUrl = axLog.group(1)+'Pre_TestSet---0.html'\r\n break\r\n\r\n if(preUrl is not None):\r\n f = urllib2.urlopen(preUrl,timeout=10)\r\n runIds = re.findall(r'Getting TestCase Steps from testrun with id:(\\d+)<',f.read())\r\n howRun = 'Automated-Jenkins-AX'\r\n\r\n else:\r\n with open(preFile,'r') as f:\r\n runIds = re.findall(r'Getting TestCase Steps from testrun with id:(\\d+)<',f.read())\r\n\r\n return (runIds,howRun)\r\n\r\ndef getHowRun(runId):\r\n url = BASE_URL + runId\r\n f = urllib2.urlopen(url,timeout=10)\r\n if(str(f.getcode()) != '200'):\r\n print(\"Failed to get howrun: \"+url)\r\n\r\n root = ET.fromstring(f.read())\r\n if(root is None):\r\n return None\r\n fields = root.find('Fields')\r\n if(fields is None):\r\n return None\r\n for field in fields.findall('Field'):\r\n howRunField = field.get('Name')\r\n if(howRunField == 'user-template-51'):\r\n return field[0].text\r\n return None\r\n\r\ndef setHowRun(runId,value):\r\n url = BASE_URL + runId\r\n field = 'user-template-51'\r\n data = '\\\r\n \\\r\n '+value+''\r\n\r\n req = urllib2.Request(url,data=data)\r\n req.get_method = lambda: \"PUT\"\r\n f = urllib2.urlopen(req)\r\n if(str(f.getcode()) != '200'):\r\n print(\"Failed to set howrun: \"+url)\r\n\r\n print(\"howrun is set to: \"+value)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"post_build.py","file_name":"post_build.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"498064669","text":"# Course: \n# Author: \n# Assignment: \n# Description:\n\n\nclass UndirectedGraph:\n \"\"\"\n Class to implement undirected graph\n - duplicate edges not allowed\n - loops not allowed\n - no edge weights\n - vertex names are strings\n \"\"\"\n\n def __init__(self, start_edges=None):\n \"\"\"\n Store graph info as adjacency list\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n self.adj_list = dict()\n\n # populate graph with initial vertices and edges (if provided)\n # before using, implement add_vertex() and add_edge() methods\n if start_edges is not None:\n for u, v in start_edges:\n self.add_edge(u, v)\n\n def __str__(self):\n \"\"\"\n Return content of the graph in human-readable form\n DO NOT CHANGE THIS METHOD IN ANY WAY\n \"\"\"\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'\n\n # ------------------------------------------------------------------ #\n\n def add_vertex(self, v: str) -> None:\n \"\"\"\n parameters:\n v(str): vertex to be added to the graph\n\n returns:\n none\n\n functionality:\n adds specified vertex to graph if not existing there already\n \"\"\"\n if v not in self.adj_list:\n self.adj_list[v] = []\n\n\n def add_edge(self, u: str, v: str) -> None:\n \"\"\"\n parameters:\n u(str): vertex 1\n v(str): vertex 2\n\n returns:\n none\n\n functionality:\n ensures that by the end of execution, both vertices exist, and are connected by an edge unless\n vertices are the same\n \"\"\"\n if u == v:\n return\n\n if u not in self.adj_list:\n self.add_vertex(u)\n\n if v not in self.adj_list:\n self.add_vertex(v)\n\n # add u to adjacent neighbors of v\n if u not in self.adj_list[v]:\n self.adj_list[v].append(u)\n\n # add v to adjacent neighbors of u\n if v not in self.adj_list[u]:\n self.adj_list[u].append(v)\n\n\n def remove_edge(self, v: str, u: str) -> None:\n \"\"\"\n parameters:\n v(str): vertex 1\n u(str): vertex 2\n\n returns:\n none\n\n functionality:\n removes connection (edge) between specified vertices if edge exists\n \"\"\"\n\n if v in self.adj_list and u in self.adj_list:\n if u in self.adj_list[v]:\n # if it exists in one, it exists in the other\n self.adj_list[v].remove(u)\n self.adj_list[u].remove(v)\n\n def remove_vertex(self, v: str) -> None:\n \"\"\"\n parameters:\n v(str): vertex to remove\n\n returns:\n none\n\n functionality:\n removes vertex and all edges connected to it from the graph\n \"\"\"\n\n if v in self.adj_list:\n neighbors = self.adj_list[v]\n\n else:\n return\n\n for neighbor in neighbors:\n # remove v from each of v's neighbor's neighbor-list\n self.adj_list[neighbor].remove(v)\n\n # finally remove the vertex from the dictionary of vertices\n self.adj_list.pop(v, None)\n\n def get_vertices(self) -> []:\n \"\"\"\n parameters:\n none\n\n returns:\n list of all vertices within the graph\n\n functionality:\n gathers all vertices from graph in no particular order\n \"\"\"\n\n return list(self.adj_list.keys())\n\n def get_edges(self) -> []:\n \"\"\"\n parameters:\n none\n\n returns:\n list of all edges in the graph\n\n functionality:\n returns a list of all edges in the graph in no particular order\n \"\"\"\n edges = []\n\n added = {}\n\n for vertex in self.adj_list:\n\n for neighbor in self.adj_list[vertex]:\n # make sure we don't include duplicate edges since (A, C) = (C, A) in an undirected graph\n if (vertex, neighbor) not in added and (neighbor, vertex) not in added:\n edges.append((vertex, neighbor))\n added[(vertex, neighbor)] = True\n\n return edges\n \n\n def is_valid_path(self, path: []) -> bool:\n \"\"\"\n parameters:\n path(python list): a sequence of vertices\n\n returns:\n boolean value describing whether or not sequence given is a path\n\n functionality:\n Determines whether specified sequence is a path in the graph\n\n An empty path is valid\n \"\"\"\n path_size = len(path)\n\n if path_size == 0:\n return True\n\n is_path = True\n path_ind = 0\n while is_path and path_ind < path_size:\n curr_vertex = path[path_ind]\n # if vertex not in graph, not a possible path!\n if curr_vertex not in self.adj_list:\n return False\n if path_ind + 1 < path_size:\n next_vertex = path[path_ind+1]\n # at this point if we are on the last vertex then we have found a path here from vertex 1\n else:\n return True\n curr_vertex_neighbors = self.adj_list[curr_vertex]\n # check to see if the next vertex in the path is a neighbor of the current one, return once\n # this is not the case, else keep checking\n if next_vertex not in curr_vertex_neighbors:\n return False\n\n path_ind += 1\n\n\n def dfs(self, v_start, v_end=None) -> []:\n \"\"\"\n parameters:\n v_start(str): starting vertex\n v_end(str): optional ending vertex\n return_cyclic(bool): parameter given if we want the cyclic nature of the graph instead\n of a list of travelled-to vertices\n\n returns:\n list containing all visited vertices from the depth-first-search\n\n functionality:\n performs a DFS on the graph starting at the specified index and returns a list\n of all visited vertices\n \"\"\"\n\n to_visit_stack = []\n visited_stack = []\n\n if v_start not in self.adj_list:\n return visited_stack\n\n to_visit_stack.append(v_start)\n to_visit_stack_len = 1\n\n # while we still need to visit vertices continue the traversal\n while to_visit_stack_len > 0:\n curr_vertex = to_visit_stack.pop()\n to_visit_stack_len -= 1\n # only process non-visited vertices\n if curr_vertex not in visited_stack:\n visited_stack.append(curr_vertex)\n # if an end is specified, this is where it comes to play\n if curr_vertex == v_end:\n return visited_stack\n # sort in reverse order so that the stack has the lowest values on top\n curr_vertex_neighbors = sorted(self.adj_list[curr_vertex], reverse=True)\n # descends from highest to lowest, appending lowest values top top of stack\n for neighbor in curr_vertex_neighbors:\n to_visit_stack.append(neighbor)\n to_visit_stack_len += 1\n\n return visited_stack\n\n def bfs(self, v_start, v_end=None) -> []:\n \"\"\"\n parameters:\n v_start(str): starting vertex of BFS\n v_end(str) OPTIONAL: ending vertex of BFS\n\n returns:\n list of all vertices visited\n\n functionality:\n returns in the order visited all vertices travelled to from start to end if end is reached\n \"\"\"\n to_visit_queue = []\n visited_stack = []\n\n if v_start not in self.adj_list:\n return visited_stack\n\n to_visit_queue.append(v_start)\n to_visit_queue_len = 1\n\n while to_visit_queue_len > 0:\n # dequeue\n curr_vertex = to_visit_queue.pop(0)\n to_visit_queue_len -= 1\n\n if curr_vertex not in visited_stack:\n visited_stack.append(curr_vertex)\n # if an end is specified, this is where it comes to play\n if curr_vertex == v_end:\n return visited_stack\n # sort in regular ascending order so that lower values are dequeued first\n curr_vertex_neighbors = sorted(self.adj_list[curr_vertex])\n # descends from highest to lowest, appending lowest values top top of stack\n for neighbor in curr_vertex_neighbors:\n to_visit_queue.append(neighbor)\n to_visit_queue_len += 1\n\n return visited_stack\n\n def count_connected_components(self)->int:\n \"\"\"\n parameters:\n none\n\n returns:\n number of connected components in the graph\n\n functionality:\n finds and counts all connected components in the graph\n\n all vertices travelled to via a DFS/BFS produces a connected component, I use\n this property to count all unique DFS connected components\n \"\"\"\n\n # the idea here is that with DFS, is returns a list of all visited vertices\n # therefore, it contains a connected component\n\n # so any accounted for vertex, or a vertex found from a DFS, will not need to be\n # checked to find a potential connected component\n component_count = 0\n accounted_for = {}\n for vertex in self.adj_list:\n # increment components each time an unaccounted for vertex is used as a DFS start\n if vertex not in accounted_for:\n component = self.dfs(vertex)\n component_count += 1\n\n for link in component:\n accounted_for[link] = True\n\n return component_count\n\n def has_cycle(self)->bool:\n \"\"\"\n parameters:\n none\n\n returns:\n boolean value indicating whether or not graph contains a cycle\n\n functionality:\n determines whether or not graph is acyclic\n\n for a cycle to exist in a graph, it must have at least as many edges as it does vertices\n if I keep track of unique edges within each connected component, I can determine whether\n or not this statement is True or False\n \"\"\"\n accounted_for = {}\n\n # loop through each unique connected component (subgraph)\n for vertex in self.adj_list:\n # initialize edge count, to_visit and visited stack for each connected component\n edge_count = 0\n to_visit_stack = []\n visited_stack = []\n edges = {}\n\n to_visit_stack.append(vertex)\n to_visit_stack_len = 1\n\n # increment components each time an unaccounted for vertex is used as a DFS start\n if vertex not in accounted_for:\n\n # while we still need to visit vertices continue the traversal\n while to_visit_stack_len > 0:\n curr_vertex = to_visit_stack.pop()\n to_visit_stack_len -= 1\n # only process non-visited vertices\n if curr_vertex not in visited_stack:\n visited_stack.append(curr_vertex)\n # sort in reverse order so that the stack has the lowest values on top\n curr_vertex_neighbors = sorted(self.adj_list[curr_vertex], reverse=True)\n # descends from highest to lowest, appending lowest values top top of stack\n for neighbor in curr_vertex_neighbors:\n # account for unique edges\n if (curr_vertex, neighbor) not in edges and (neighbor, curr_vertex) not in edges:\n edge_count += 1\n edges[(curr_vertex, neighbor)] = True\n to_visit_stack.append(neighbor)\n to_visit_stack_len += 1\n\n for link in visited_stack:\n accounted_for[link] = True\n\n # this is the minimum size subgraph needed for a cycle to exist\n if (edge_count >= 3) and edge_count >= len(visited_stack):\n return True\n\n return False\n\nif __name__ == '__main__':\n\n print(\"\\nPDF - method add_vertex() / add_edge example 1\")\n print(\"----------------------------------------------\")\n g = UndirectedGraph()\n print(g)\n\n for v in 'ABCDE':\n g.add_vertex(v)\n print(g)\n\n g.add_vertex('A')\n print(g)\n\n for u, v in ['AB', 'AC', 'BC', 'BD', 'CD', 'CE', 'DE', ('B', 'C')]:\n g.add_edge(u, v)\n print(g)\n\n\n print(\"\\nPDF - method remove_edge() / remove_vertex example 1\")\n print(\"----------------------------------------------------\")\n g = UndirectedGraph(['AB', 'AC', 'BC', 'BD', 'CD', 'CE', 'DE'])\n g.remove_vertex('DOES NOT EXIST')\n g.remove_edge('A', 'B')\n g.remove_edge('X', 'B')\n print(g)\n g.remove_vertex('D')\n print(g)\n\n\n print(\"\\nPDF - method get_vertices() / get_edges() example 1\")\n print(\"---------------------------------------------------\")\n g = UndirectedGraph()\n print(g.get_edges(), g.get_vertices(), sep='\\n')\n g = UndirectedGraph(['AB', 'AC', 'BC', 'BD', 'CD', 'CE'])\n print(g.get_edges(), g.get_vertices(), sep='\\n')\n\n\n print(\"\\nPDF - method is_valid_path() example 1\")\n print(\"--------------------------------------\")\n g = UndirectedGraph(['AB', 'AC', 'BC', 'BD', 'CD', 'CE', 'DE'])\n test_cases = ['ABC', 'ADE', 'ECABDCBE', 'ACDECB', '', 'D', 'Z']\n for path in test_cases:\n print(list(path), g.is_valid_path(list(path)))\n\n\n print(\"\\nPDF - method dfs() and bfs() example 1\")\n print(\"--------------------------------------\")\n edges = ['AE', 'AC', 'BE', 'CE', 'CD', 'CB', 'BD', 'ED', 'BH', 'QG', 'FG']\n g = UndirectedGraph(edges)\n test_cases = 'ABCDEGH'\n for case in test_cases:\n print(f'{case} DFS:{g.dfs(case)} BFS:{g.bfs(case)}')\n print('-----')\n for i in range(1, len(test_cases)):\n v1, v2 = test_cases[i], test_cases[-1 - i]\n print(f'{v1}-{v2} DFS:{g.dfs(v1, v2)} BFS:{g.bfs(v1, v2)}')\n\n\n print(\"\\nPDF - method count_connected_components() example 1\")\n print(\"---------------------------------------------------\")\n edges = ['AE', 'AC', 'BE', 'CE', 'CD', 'CB', 'BD', 'ED', 'BH', 'QG', 'FG']\n g = UndirectedGraph(edges)\n test_cases = (\n 'add QH', 'remove FG', 'remove GQ', 'remove HQ',\n 'remove AE', 'remove CA', 'remove EB', 'remove CE', 'remove DE',\n 'remove BC', 'add EA', 'add EF', 'add GQ', 'add AC', 'add DQ',\n 'add EG', 'add QH', 'remove CD', 'remove BD', 'remove QG')\n for case in test_cases:\n command, edge = case.split()\n u, v = edge\n g.add_edge(u, v) if command == 'add' else g.remove_edge(u, v)\n print(g.count_connected_components(), end=' ')\n print()\n\n\n print(\"\\nPDF - method has_cycle() example 1\")\n print(\"----------------------------------\")\n edges = ['AE', 'AC', 'BE', 'CE', 'CD', 'CB', 'BD', 'ED', 'BH', 'QG', 'FG']\n g = UndirectedGraph(edges)\n test_cases = (\n 'add QH', 'remove FG', 'remove GQ', 'remove HQ',\n 'remove AE', 'remove CA', 'remove EB', 'remove CE', 'remove DE',\n 'remove BC', 'add EA', 'add EF', 'add GQ', 'add AC', 'add DQ',\n 'add EG', 'add QH', 'remove CD', 'remove BD', 'remove QG',\n 'add FG', 'remove GE')\n for case in test_cases:\n command, edge = case.split()\n u, v = edge\n g.add_edge(u, v) if command == 'add' else g.remove_edge(u, v)\n print('{:<10}'.format(case), g.has_cycle())\n","sub_path":"ud_graph.py","file_name":"ud_graph.py","file_ext":"py","file_size_in_byte":15927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"487343608","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 17 21:19:48 2020\n\n@author: fneir\n\nidea: compararar dos bases de datos\n identificadores, codigos de estaciones.\n\n\"\"\"\n#%% COMPARAR CON CR2\n\nimport pandas as pd, numpy as np\n\nfile_cr2 = 'E:/ESTACIONES_CORRECCION/BBDD/cr2_prAmon_2019/cr2_prAmon_2019.txt'\nfile_dga = 'E:/ESTACIONES_CORRECCION/DGA/DGA_1980-2020_reporte_web.xlsx'\n\ncr2 = pd.read_csv(file_cr2, sep=',', na_values=-9999, header = None,\n skiprows=( np.concatenate(([2, 8], np.arange(10,975)), axis=0) ),\n encoding='latin_1')\n\ndga = pd.read_excel(file_dga, sheet_name=0, index_col=None)\n\ncr2.info()\ncr2.head()\ndga.info()\ndga.head()\n# dga.columns = None\n\ncod_dga = list([str(np.int(cod.split('-')[0])) for cod in dga.iloc[0,1:]])\ncod_cr2 = list(cr2.iloc[0])\n\nfiltro = set(cod_cr2).intersection(cod_dga)\n\nlen(filtro)-len(cod_dga)\n\ndga.columns = list([dga.iloc[0,0]]) + ([str(np.int(cod.split('-')[0])) for cod in dga.iloc[0,1:]])\ncr2.columns = list(cr2.iloc[0])\n\ncr2v2 = cr2[filtro]\ndgav2 = dga[filtro]\n\ndgav2 = dgav2.T; dgav2.columns = (dga.iloc[:,0])\ncr2v2 = cr2v2.T; cr2v2.columns = cr2.iloc[:,0]\n\ndgav2.nombre = [nom.strip().upper() for nom in dgav2.nombre]\ncr2v2.nombre = [nom.upper() for nom in cr2v2.nombre]\n\n# este no funciono, porque los nombres no estan iguales\n# dgav2 = dgav2.sort_values(by=['nombre'])\n# cr2v2 = cr2v2.sort_values(by=['nombre'])\n\ndgav2 = dgav2.sort_values(by=['codigo_estacion'])\ncr2v2 = cr2v2.sort_values(by=['codigo_estacion'])\n\nfor count, i in enumerate(dgav2.nombre): print(dgav2.nombre[count]==cr2v2.nombre[count], dgav2.nombre[count], cr2v2.nombre[count])\n\n\n##%%\narray_dga = np.array(dgav2[dgav2.columns[-480:]].apply(pd.to_numeric))\narray_cr2 = np.array(cr2v2[cr2v2.columns[-480:]].apply(pd.to_numeric))\n\nguardar = np.copy(array_dga)\n\narray_cr2.dtype\narray_dga.dtype\n\narray_dga = np.nan_to_num(array_dga, copy=True, nan=-9999, posinf=None, neginf=None)\narray_cr2 = np.nan_to_num(array_cr2, copy=True, nan=-9999, posinf=None, neginf=None)\n\ncompara = np.where((array_cr2-array_dga)==0, True, False)\ncompara1 = np.array(compara, dtype=np.float)\n#compara1 = np.where((array_cr2-array_dga)==0, True, False)\n# compara = np.where(compara==0, True, False)\n# compara = np.where(array_dga == array_cr2, True, False)\n# compara2 = array_dga == array_cr2\n# bb = compara1==compara2\n\n# [print(i) for i in compara[::,::]]\ncuenta=0\nfor count_row, row in enumerate(compara[:]):\n for count_item, item in enumerate(row):\n if compara[count_row, count_item] == True:\n compara1[count_row, count_item] = array_dga[count_row, count_item]\n elif compara[count_row, count_item] == False:\n if array_dga[count_row, count_item] > array_cr2[count_row, count_item]:\n compara1[count_row, count_item] = array_dga[count_row, count_item]\n elif array_dga[count_row, count_item] < array_cr2[count_row, count_item]:\n compara1[count_row, count_item] = array_cr2[count_row, count_item]\n cuenta=cuenta+1\n\ncompara1[compara1<0] = np.nan\n\n\nrelleno = (dga.iloc[:,(dga.columns).isin(filtro)]).T\n\nrelleno.columns = dga.iloc[:,0]\n\nrelleno = relleno.sort_values(['codigo_estacion'])\n\nrelleno[relleno.columns[-480:]] = np.array(relleno[relleno.columns[-480:]].apply(pd.to_numeric))\n\nrelleno.fuente = 'dga_web+cr2'\n\nrelleno[relleno.columns[-480:]] = compara1\n\nrelleno = relleno.sort_values(['Estación'])\n\nrelleno = relleno.T\n\nfechas = [(i) for i in (relleno.index[-480:])]\n\n#fechas\nrelleno = relleno.reindex(['CodigoBNA', 'codigo_estacion', 'fuente', 'institucion',\n 'nombre', 'Estación', 'UTMNorte(m)', 'UTMEste(m)',\n 'LatitudS', 'LongitudW', 'lat', 'lon', 'Altitud(msnm)',\n 'Cuenca', 'SubCuenca', 'ÁreaDrenaje(km2)']+\n fechas)\n# relleno.info()\n# sumario = [relleno.iloc[16:,].describe(), relleno.iloc[16:,].min(),\n# relleno.iloc[16:,].max(),relleno.iloc[16:,].std()]\n\n# relleno.summary()\n\n\nrelleno.to_excel('D:/Desktop/DGA_CR2_1980-2019.xlsx', header=False, encoding='utf-8')\n\n#%% COMPARAR CON EROSION 2010\nimport pandas as pd, numpy as np, os\n\nos.chdir('E:/Factor_R/')\n\ner2010 = pd.read_excel('BBDD/erosion2010/pp_erosion_2010_ordenado.xlsx')\ndgacr2 = pd.read_excel('BBDD/DGA_reporte_web_1980-2019/DGA_CR2_1980-2019.xlsx')\n\ner2010[pd.isnull(er2010)] = -9999\ndgacr2[pd.isnull(dgacr2)] = -9999\n\n# filtrado de datos.\ncod_er2010 = [str(i).strip() for i in pd.to_numeric(er2010.iloc[2,1:])]\ncod_dgacr2 = [str(i).strip() for i in pd.to_numeric(dgacr2.iloc[1,1:])]\n\nfiltro = set(cod_er2010).intersection(cod_dgacr2)\n\n# columnas y filtrado\ner2010.columns = ['CODIGO']+cod_er2010 # er2010.iloc[2,:]\ndgacr2.columns = ['CODIGO']+cod_dgacr2 # [i.strip() for i in dgacr2.iloc[1,:]]\n\ner2010.head()\ndgacr2.head()\n\ner2010v2 = (er2010[filtro]).T\ndgacr2v2 = (dgacr2[filtro]).T\n\ner2010v2.columns = er2010.iloc[:,0]\ndgacr2v2.columns = dgacr2.iloc[:,0]\n\ner2010v2 = er2010v2.sort_values(by=['CODIGO'])\ndgacr2v2 = dgacr2v2.sort_values(by=['CODIGO'])\n\ncomparar_nombres = (pd.DataFrame([list(er2010v2.NOMBRE==dgacr2v2.nombre), list(er2010v2.NOMBRE), list(dgacr2v2.nombre)])).T\n\n# aqui hay que hacer match de los periodos de tiempo.\n# erosion2010 va desde 1900 a 2010, debi poner 2019\n# dgacr2 va desde 1980 a 2019\narray_er2010v2 = np.array(er2010v2.iloc[:,-(364+8):-24].apply(pd.to_numeric))\narray_dgacr2v2 = np.array(dgacr2v2.iloc[:,16:364].apply(pd.to_numeric))\n\n# aa = er2010v2.iloc[:,-(364+8):-24]\n# bb = dgacr2v2.iloc[:,16:364]\n\ncompara = np.where((array_er2010v2 - array_dgacr2v2)==0, True, False)\ncompara1 = np.array(compara, dtype=np.float)\n\n# analizar caso a caso que valores cambiar\n# si son iguales se mantiene el valor\n# si difieren, se mantiene el valor de erosion2010\n# si difieren y erosion2010 es -9999, y dgacr2 existe, queda dgacr2\n# si difieren y erosion2010 no es -9999, queda erosion2010\ncuenta=0\nfor count_row, row in enumerate(compara[:]):\n for count_item, item in enumerate(row):\n # son iguales\n if compara[count_row, count_item] == True:\n compara1[count_row, count_item] = array_dgacr2v2[count_row, count_item]\n # difiere\n elif compara[count_row, count_item] == False:\n # si difiere y ero2010=-9999\n if array_dgacr2v2[count_row, count_item] == -9999:\n compara1[count_row, count_item] = array_er2010v2[count_row, count_item]\n cuenta=cuenta+1\n # si difiere y ero2010>-9999\n else: #array_dgacr2v2[count_row, count_item] < array_er2010v2[count_row, count_item]:\n compara1[count_row, count_item] = array_dgacr2v2[count_row, count_item]\n\n\n# reemplazar los nan definidos como -9999, a NAN\n# compara1[compara1<0] = np.nan\n\ndgacr2v2.iloc[:,16:364] = compara1\ndgacr2v2.fuente = 'dga_web+cr2+erosion2010'\n\n# datos no comprendidos dentro de la comparacion anterior, son los que deberian\n# agregarse como la base completa de datos.\nno_compara = dgacr2[dgacr2.columns.difference(filtro, sort=False)]\n# no_compara[no_compara<0] = np.nan\n\n#dejar index (rows) iguales\nno_compara.index = dgacr2v2.T.index\n\nbb = (pd.merge(left=no_compara, right=dgacr2v2.T, how='outer', left_index=True, right_index=True)).reset_index(drop=True)\n\nbb.head(20)\ndgacr2.head(20)\n\nlen(set(bb.columns.to_list()).intersection(set(dgacr2.columns.to_list())))\n\ncc = bb[(['CODIGO']+sorted((bb.columns.to_list()))[:-1])] == dgacr2[(['CODIGO']+sorted(dgacr2.columns.to_list())[:-1])]\n\nbb.to_excel('BBDD/DGA_CR2_Erosion2010_1980-2019.xlsx', header=False, encoding='utf-8')\ncc.to_excel('BBDD/DGA_CR2_Erosion2010_comparacion.xlsx', header=False, encoding='utf-8')\n# bb[bb<0] = np.nan\n\n\n\n\n# en este paso se busca reemplazar el dato dentro de la serie original.\n\n\n# bb = dgacr2 == no_compara\n# aa = dgacr2v2.iloc[:,16:364] == compara1\n# er2010v2.iloc[:,-(364+8):-24].to_excel('BORRAR/er2010v2.xlsx')\n# dgacr2v2.iloc[:,16:364].to_excel('BORRAR/dgacr2v2.xlsx')\n# pd.DataFrame(compara1).to_excel('BORRAR/compara1.xlsx')\n# pd.DataFrame(no_compara).to_excel('BORRAR/no_compara.xlsx')\n\n\n\n\n# #%%\n# aa = cr2.iloc[:,cr2.where(cr2.iloc[2,1:], dga.iloc[2,1:], axis=0)]\n\n# f = list(set(cod_dga) & set(cod_cr2))\n\n\n\n# [i for i, j in zip(cod_dga, cod_cr2) if i == j]\n\n\n# # cr2 = (cr2.T)\n# #cr2.iloc[:,2] = [nomme.upper() for nomme in cr2.iloc[:,2]]\n\n# # cr2.columns = ([nomme.upper() for nomme in cr2.iloc[2]])\n# cr2.columns = list(cr2.iloc[0])\n\n# cr2.loc[cr2.where(f) f]\n# cr2.iloc[cr2.where(cr2.columns, cod_dga, axis=0)]\n\n# list(x for x in lst_df if x[\"origin\"] == 'JFK' and x[\"carrier\"] == 'B6')\n\n# type(cod_dga)\n\n\n# df.iloc[[index for index,row in df.iterrows() if row['origin'] == 'JFK' and row['carrier'] == 'B6']]\n\n# cr2.iloc[[index for index,row in cr2.iterrows() if cr2['NOMBRE'] == 'VISVIRI']]\n\n# [cr2.NOMBRE in ['VISVIRI', 'HUMALPACA']]\n# pd.\n\n# cr2.head()\n# dga.head()\n","sub_path":"comparar_bbdd.py","file_name":"comparar_bbdd.py","file_ext":"py","file_size_in_byte":8894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"338048642","text":"import numpy as np\n#Need numpy for matrix calculations\nclass HMM():\n\n def __init__(self, transmission_prob, emission_prob, initial_states = None):\n \"\"\"\n transmission is the probabilites P(X_t|X_[t-1]) of a given random variable X_t representing all possible states in the HMM\n , i.e. the probabilites of changing state from one state to the others.\n An T x T matrix, where T is the number of states possible.\n\n emission is the probabilites assoiated with P(E | X_t) of what is the probability of observing E, given the state at t.\n , i.e. the probabilites of making an observation E given a certain state. If its raining, what is the probability we observe an umbrella or no umbrella.\n An M x T matrix, where T is the number of possible states, and M is the possible number of observations. \n In our case we have 2 states, raining or not raining, and 2 possible observations, umbrella or no umbrella.\n\n init_obs is the initial observation probabilites, if we don't know each possible observation we set it to 0.5s for each observation element.\n an 1 x M matris, where M is all possible observation labels. \n \"\"\"\n self.transmission_prob = transmission_prob #transmission probabilities.\n\n self.emission_prob = emission_prob #emission probabilities.\n\n self.numberOfstate = transmission_prob.shape[1]\n #number of states are equal the different states for X, is equal the number of columbs of the transmission matrix.\n \n #an np.matrix object\n self.states_f = initial_states #this is our f value in the algorithm, set as f_0:0 to begin with. Should be transposed.\n # states[0] is the probability of it being rain, and states[1] is the probability of it being no rain.\n # Hold the previous step under calculation.\n \n self.b_messages = self.init_b_messages() # an vector where every element is one. Hold the previous step under calculation.\n\n if initial_states == None:\n #If we don't have any intial states, we have to initialize to an normalised array of equal probabilities.\n self.states_f = self.init_obs_states()\n else:\n #Check if the inital states are f_normalized\n if(initial_states.shape[1] != 1):\n self.states_f = self.states_f.getT()\n f_t = self.states_f.getT()\n\n if(f_t*self.states_f != 1):\n print(\"Error, initial states are not normalized, all values should be equal to 1\")\n return\n\n def init_obs_states(self):\n \"\"\" Initiate the initial states, if we don't know these, we set them as being equally probable \"\"\"\n n_obsStates = self.emission_prob.shape[1] #collect number of columbs in the emission matrix, there should be one per observable event.\n onesM = np.ones(n_obsStates) #create an all ones matrix from this number\n return np.matrix([onesM*(1/n_obsStates)]).getT() #normalize and return.\n\n def init_b_messages(self):\n \"\"\" create an nx1 matrix with only 1s \"\"\"\n return np.matrix(np.ones(self.numberOfstate)).getT()\n \n def observMatrix(self, observation):\n \"\"\" Create the observation matrix dependant on what observations we have observed (Observation model as matrix),\n since from the observations matrix, the diagonal corresponds to the observation of one event, columb 0 is umbrella, and columb 1 is no umbrella. \"\"\"\n\n columb_eventProb = self.emission_prob[:,observation] #return an array of elemenst we are interested in.\n #print(columb_eventProb)\n #print(columb_eventProb)\n return np.matrix(np.diag(np.ravel(columb_eventProb))) #create an observation matrix of a given observation with the event matrix.\n \"\"\"\n if observation is 0, umbrella: the observation matrix should look like [[0.9, 0],[0, 0.2]]\n if observation is 1, no umbrella: the obs_matrix should look like [[0.1 ,0],[0, 0.8]]\n \"\"\" \n\n def forward(self, observations):\n \"\"\" forward operation, calculating the forward messanges for given set of observations \"\"\"\n forward_states = [] #forward messages\n forward_states.append(np.ravel(self.states_f.getT()).tolist()) #simply hold the intial state.(used in forward-backward)\n\n for observation in np.squeeze(observations.tolist()): #itterate tru every observation. \n # f_t = O_t*T.transposed()(f_t-1).transpoed() \n state_t = (self.observMatrix(observation).getT()*self.transmission_prob.getT()) * self.states_f #the calculation\n self.states_f = self.normalizer(state_t) #normalize the array\n #print(\"we got f_t: {}\".format(self.states))\n forward_states.append(np.ravel(self.states_f.transpose()).tolist()) #store the normalized state\n self.states_f = np.matrix(forward_states[0]).getT() #set the initial states back\n return forward_states #Return list of the forward states.\n #state vector f_0:t = normalizer( f_0:t-1 T O_t ), which we can easily computet, since all 3 are matrixes\n #Itterate trou every obeservation, calculating the probable state at given time.\n \n def forward_i(self, f_prev , observation):#one itteration of forward algorithm, to match forward-backward algoritm.\n \"\"\" One itteration of the forward algoritm, but not used, since it works just as well doing them all at once. \"\"\"\n forward_state = self.normalizer((self.observMatrix(observation)*self.transmission_prob.getT())*f_prev)\n return np.ravel(forward_state).tolist() #turn matrix into list.\n\n def backward_i(self,b_prev, observation):\n b_today = (self.transmission_prob*self.observMatrix(observation)*b_prev)\n return self.normalizer(b_today)\n\n def backward(self, observations):\n \"\"\" backwards algorithm. \"\"\"\n backward_states = [] #backward messages.\n backward_states.append(np.ravel(self.b_messages).tolist()) #add our initial state of all ones.\n for i, observation in reversed(list(enumerate(np.ravel(observations).tolist()))): #itterate tru observations in reverse.\n state_t = (self.transmission_prob*self.observMatrix(observation)*self.b_messages) #perform the calculation\n self.b_messages = self.normalizer(state_t) # normalise the result\n backward_states.append(np.ravel(self.b_messages).tolist()) #add to backward messages\n self.b_messages = self.init_b_messages()\n backward_states.reverse() #need to reverse the list, bc index 0 is actually the last element at this stage, for later use.\n return backward_states\n \n def forward_backward(self, observations):\n \"\"\" Forward-Backward algorithm, calculates based on an given observation \"\"\"\n\n s_vector = [] #smooted result\n #first element is f_0.\n f_messages = self.forward(observations) #f_messages from the forward algorithm, from k=0 to t. \n #last element is initial b value.\n b_messages = self.backward(observations) #b_messages from the backward algorithm., frok k=0 to t.\n n_obs = observations.size\n for i in range(n_obs+1):\n #Basicly multply corresponding element in f_messages with the same i b_messanges, \n # Since they are both represeted as an array, we have to transform to an np.matrix to run an multiplication.\n # This is basicly what we have to do for the matrixes to multiply together to [x_i*y_i, x_j,y_j, etc]\n # We run nx1 * nxm multiplication and pick out the diagonal to an 1xn matrix. \n #sv = np.matrix( np.diag( np.matrix(f_messages[i]).getT()*np.matrix(b_messages[i]) ) ).getT() #Old method.\n sv = np.multiply(np.matrix(f_messages[i]).getT(), np.matrix(b_messages[i]).getT())\n #Add the new result to s_vector.\n s_vector.append(np.ravel(self.normalizer(sv).getT()).tolist())\n return s_vector\n\n def normalizer(self, v):\n \"\"\" Takes a vector v transposed, and normalizes it so that the sum of all possible states are 1.\n this is done by taking 1 and divide by the sum of the unnormalized states f.\n e.g. say we have f = [[0.5645], [0.0745]], the sum of f is 0.639,\n 1/0.639 = 1.5649.\n f_normalized = 1.5649*[[0.5645], [0.0745]] = 0.8834 0.1166\n \"\"\"\n c = 1/v.sum()\n return c*v #normalize by multiplying with normalizing constant\n\n#Part B\ntransmissions = np.matrix([[0.7, 0.3],[0.3, 0.7]]) #equal to the dynamic model\n\nobservations_B_1 = np.matrix([[0,0]]) #0 is equals umbrella, 1 is equal no umbrella\n\nemissions = np.matrix([[0.9, 0.1],[0.2, 0.8]]) #Emissions probabilites\n\nmodel = HMM(transmissions, emissions) #create HMM model instance. We don't know the initial states, so we don't pass this one. \n#Part B 1\nprint(\"########## PART B 1 ############\")\nprint(\"Task B_1 forward states normalized:t0 to t \\n{} \".format(model.forward(observations_B_1)))\nprint(\"\\n########## PART B 2 ############\")\n#Part B 2\nobservations_B_2 = np.matrix([0,0,1,0,0]) # umbrella, umbrella, no umbrella, umbrella, umbrella.\nprint(\"\\Task B_2 forward states normalized from stat t0 to t \\n{}\".format(model.forward(observations_B_2)))\n\n#Part C 1\nobservations_C_1 = observations_B_1 # umbrella, umbrella.\nprint(\"\\n########## PART C 1 ############\")\nprint(\"Task C_1 P(X_1|e_1:2) {}\".format(model.forward_backward(observations_C_1)[1]) )\nprint(\"Task C_1 SV {}\".format(model.forward_backward(observations_C_1)) )\nprint(\"Task C_1 backward messages: \\n {}\".format(model.backward(observations_C_1)))\nprint(\"Task C_1 forward messages: \\n {}\".format(model.forward(observations_C_1)))\n#Part C 2\nobservations_C_2 = observations_B_2 # umbrella, umbrella, no umbrella, umbrella, umbrella.\nprint(\"\\n########## PART C 2 ############\")\nprint(\"Task C_2 backward messages: \\n{}\".format(model.backward(observations_C_2)) )\nprint(\"Task C_2 smoothed probability values: \\n{}\".format(model.forward_backward(observations_C_2)))\nprint(\"Task C_2 forward messages: \\n{}\".format(model.forward(observations_C_2)))","sub_path":"hiddenMarkov.py","file_name":"hiddenMarkov.py","file_ext":"py","file_size_in_byte":10112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"239170972","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n \r\nG33L=pd.read_csv('AllG33L.csv', header=0)\r\ndf1=pd.DataFrame(G33L)\r\nG32G=pd.read_csv('AllG32G.csv', header=0)\r\ndf2=pd.DataFrame(G32G)\r\nDIFFD=pd.read_csv('AllDIFF.csv',header=0)\r\ndf3=pd.DataFrame(DIFFD)\r\n\r\n\r\n\r\nprint(\"This is a program to calculate the TOLERANCE AND ACTION LIMITS for the Tomotherapy machine for the abdominal, breast+SVC, head and neck and prostate treatments \\n\") \r\n\r\nselection=input(\"Calculate for the Gamma index (G) and/or the difference in dose? (D)\\n\")\r\n\r\nprint(\"Wait the program is processing the previus data you entered \\n\")\r\n\r\nif selection == \"G\":\r\n selection2=input(\"What actions and toletence limit do you want to know Gamma 3% 3mm (G33L) or Gamma 3% 2mm(G32G)? \\n\")\r\n if selection2 == \"G33L\": \r\n #Numbers of case for each anatomical sites\r\n NG33LA=166\r\n NG33LB=165\r\n NG33LHN=115\r\n NG33LP=281\r\n \r\n #Variable calculations for abdominal\r\n for i in range (1,len(df1)+1): #lee la fila AG33L y para el calculo hasta donde existen valores\r\n df1['diffa']=df1['AG33L'].diff(1) #diferencia \r\n df1['diffa']=df1['diffa'].abs() #valor absoluto\r\n #vaDif=df1['diffa'].abs()\r\n sumaDif=df1['diffa'].sum() #Sumatoria de diferencia\r\n sumaCL=df1['AG33L'].sum()\r\n \r\n break\r\n \r\n #Action limit calculation\r\n beta=6\r\n mean1=df1['AG33L'].mean()\r\n st1=df1['AG33L'].std()\r\n T=1\r\n a=st1**2\r\n b=(mean1-T)**2\r\n ALA=beta*np.sqrt(a+b)\r\n ALA2=ALA/2\r\n ALATOTAL=(1-ALA2)*100\r\n print(f\"The action limit for abdominal treatmens is {ALATOTAL:.2f} %\")\r\n \r\n #Tolerance limits calculation \r\n \r\n #central line\r\n CL=(1/NG33LA)*sumaCL\r\n \r\n #moving range\r\n O=1/(NG33LA-1)\r\n MR=O*sumaDif\r\n UCL=(CL+(2.66*MR))*100\r\n LCL=(CL-(2.66*MR))*100\r\n print(f\"The percentaje lower tolerance limit for abdominal treatmens is {LCL:.2f} %\")\r\n \r\n \r\n \r\n elif selection2 == \"G32G\":\r\n T=0\r\n print (\"Numero positivo\")\r\n NG32GA=71\r\n NG32GB=79\r\n NG32GHN=62\r\n NG32GP=131\r\n\r\n elif selection == \"D\":\r\n T=1 \r\n print (\"Igual a 0\")\r\n NG32GA=71\r\n NG32GB=79\r\n NG32GHN=62\r\n NG32GP=131\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n","sub_path":"spc.py","file_name":"spc.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"491290420","text":"# String and substring user input for analysis\ns1 = input(\"Enter string you wish to analyze for substring: \\n\")\ns2 = input(\"Enter substring you wish to analyze in string: \\n\")\n\n# contains_word function\ndef contains_word(s1: str, s2: str) -> str:\n \"\"\"\n Return affirmative/negatory statement depending on if string 2 is found in string 1 (s2 and s1 respectively).\n >>> contains_word(\"noway\", \"way\")\n \"'way' has been found in 'noway'\"\n >>> contains_word(\"way\", \"noway\")\n \"'noway' has not been found in 'way'\"\n \"\"\"\n \n if s2 in s1:\n print(repr(s2) + \" has been found in \" + repr(s1))\n else:\n print(repr(s2) + \" has not been found in \" + repr(s1))\n\n#function call\ncontains_word(s1, s2)\n","sub_path":"substring_checker.py","file_name":"substring_checker.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"480344368","text":"# Copyright 2011 James McCauley\n#\n# This file is part of POX.\n#\n# POX is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# POX is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with POX. If not, see .\n\nfrom flex.lib.util import str_to_bool, parse_packet\nfrom flex.base.module import Module\nimport time\nfrom flex.core import core\nfrom flex.base.handler import StorageHandler\n\n\"\"\"\nAn L2 learning switch.\n\nIt is derived from one written live for an SDN crash course.\nIt is somwhat similar to NOX's pyswitch in that it installs\nexact-match rules for each flow.\n\"\"\"\n\nlogger = core.get_logger()\n\n# We don't want to flood immediately when a switch connects.\n# Can be overriden on commandline.\n_flood_delay = 0\n\nclass LearningSwitch(StorageHandler):\n \"\"\"\n The learning switch \"brain\" associated with a single OpenFlow switch.\n \n When we see a packet, we'd like to output it on a port which will\n eventually lead to the destination. To accomplish this, we build a\n table that maps addresses to ports.\n \n We populate the table by observing traffic. When we see a packet\n from some source coming from some port, we know that source is out\n that port.\n \n When we want to forward traffic, we look up the desintation in our\n table. If we don't know the port, we simply send the message out\n all ports except the one it came in on. (In the presence of loops,\n this is bad!).\n \n In short, our algorithm looks like this:\n \n For each packet from the switch:\n 1) Use source address and switch port to update address/port table\n 2) Is transparent = False and either Ethertype is LLDP or the packet's\n destination address is a Bridge Filtered address?\n Yes:\n 2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x)\n DONE\n 3) Is destination multicast?\n Yes:\n 3a) Flood the packet\n DONE\n 4) Port for destination address in our address/port table?\n No:\n 4a) Flood the packet\n DONE\n 5) Is output port the same as input port?\n Yes:\n 5a) Drop packet and similar ones for a while\n 6) Install flow table entry in the switch so that this\n flow goes out the appopriate port\n 6a) Send the packet out appropriate port\n \"\"\"\n\n def __init__ (self, connection, transparent):\n # Switch we'll be adding L2 learning switch capabilities to\n self.connection = connection\n self.transparent = transparent\n\n self.switch = core.api.switch_api\n self.listen = core.api.listen_api\n self.message = core.api.message_api\n self.action = core.api.action_api\n self.port = core.api.port_api\n self.match = core.api.match_api\n\n # Our table\n self.macToPort = {}\n# self.L2_LEARN = 'l2_learn:%s' % self.connection.get_id()\n# core.appStorage.listen_domain(self, self.L2_LEARN)\n\n # We want to hear PacketIn messages, so we listen\n # to the connection\n self.listen.add_listeners(self, connection)\n\n # We just use this to know when to log a helpful message\n self.hold_down_expired = _flood_delay == 0\n\n# logger.debug('Initializing LearningSwitch')\n\n def handle_storage(self, key, value, domain, type):\n self.macToPort[key] = value\n\n def _add_port(self, mac, port):\n core.appStorage.set(mac, port, self.L2_LEARN)\n\n def _get_port(self, mac):\n core.appStorage.get(mac, self.L2_LEARN)\n\n\n def _handle_PacketIn(self, event):\n \"\"\"\n Handle packet in messages from the switch to implement above algorithm.\n \"\"\"\n packet = parse_packet(event.data)\n\n def flood (message = None):\n \"\"\" Floods the packet \"\"\"\n msg = self.message.packet_out_message()\n if time.time() - self.switch.get_connect_time(self.connection) >= _flood_delay:\n # Only flood if we've been connected for a little while...\n if self.hold_down_expired is False:\n # Oh yes it is!\n self.hold_down_expired = True\n # logger.info(\"%s: Flood hold-down expired -- flooding\", str(event.switch))\n\n if message is not None: logger.debug(message)\n # log.debug(\"%i: flood %s -> %s\", event.dpid,packet.src,packet.dst)\n # OFPP_FLOOD is optional; on some switches you may need to change\n # this to OFPP_ALL.\n action = self.action.output_action(self.port.flood_port())\n msg.actions.append(action)\n else:\n pass\n logger.info(\"Holding down flood for %s\", str(event.src))\n msg.buffer_id = event.buffer_id\n msg.port = event.port\n msg.data = event.data\n self.switch.send_to(self.connection, msg)\n\n def drop (duration = None):\n \"\"\"\n Drops this packet and optionally installs a flow to continue\n dropping similar ones for a while\n \"\"\"\n if duration is not None:\n if not isinstance(duration, tuple):\n duration = (duration, duration)\n msg = self.message.flow_mod_message()\n msg.match = self.match.data_match(event.data)\n msg.idle_timeout = duration[0]\n msg.hard_timeout = duration[1]\n msg.buffer_id = event.buffer_id\n self.switch.send_to(self.connection, msg)\n elif event.buffer_id is not None:\n msg = self.message.packet_out_message()\n msg.buffer_id = event.buffer_id\n msg.port = event.port\n self.switch.send_to(self.connection, msg)\n\n self.macToPort[packet.src] = event.port # 1\n# self._add_port(packet.src, event.port)\n\n if not self.transparent: # 2\n if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():\n drop() # 2a\n return\n\n if packet.dst.is_multicast:\n flood() # 3a\n else:\n# port = self._get_port(packet.dst)\n if packet.dst not in self.macToPort: # 4\n# if port is None:\n flood(\"Port for %s unknown -- flooding\" % (packet.dst,)) # 4a\n else:\n port = self.macToPort[packet.dst]\n if port == event.port: # 5\n # 5a\n logger.warning(\"Same port for packet from %s -> %s on %s.%s. Drop.\"\n % (packet.src, packet.dst, event.src, port))\n drop(10)\n return\n # 6\n# logger.debug(\"installing flow for %s.%i -> %s.%i\" %\n# (packet.src, event.port, packet.dst, port))\n msg = self.message.flow_mod_message()\n msg.match = self.match.data_match(event.data, event.port)\n msg.idle_timeout = 10\n msg.hard_timeout = 30\n msg.actions.append(self.action.output_action(port = port))\n # msg.data = event.ofp # 6a\n self.switch.send_to(self.connection, msg)\n\n\nclass l2_learning (Module):\n \"\"\"\n Waits for OpenFlow switches to connect and makes them learning switches.\n \"\"\"\n def __init__ (self, transparent):\n self.transparent = transparent\n\n def start(self):\n core.api.listen_api.add_listeners(self)\n\n def _handle_ConnectionUp(self, content):\n# logger.debug(\"Switch %s connected\" % content.src)\n LearningSwitch(content.src, self.transparent)\n\n\ndef launch():\n \"\"\"\n Starts an L2 learning switch.\n \"\"\"\n transparent = core.config.get('module.l2_learning.transparent', False)\n hold_down = core.config.get('module.l2_learning.hold_down', _flood_delay)\n\n try:\n global _flood_delay\n _flood_delay = int(str(hold_down), 10)\n assert _flood_delay >= 0\n except:\n raise RuntimeError(\"Expected hold-down to be a number\")\n core.register_component(l2_learning, str_to_bool(transparent))\n\n","sub_path":"app/l2_learning.py","file_name":"l2_learning.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"544398477","text":"\"\"\"\nConnecting to the database using psycopg2\n- Create a table\n\"\"\"\nimport psycopg2\nfrom auth import secret\n\n# establish connection to our database\nconnection = psycopg2.connect(\"dbname = example user = {}\".format(secret['username']))\n\n# open a cursor to perform database operations\ncursor = connection.cursor()\n\ncursor.execute('DROP TABLE IF EXISTS table2;')\n\ncursor.execute(\"\"\"\n CREATE TABLE table2 (\n id INTEGER PRIMARY KEY,\n completed BOOLEAN NOT NULL DEFAULT False\n );\n\"\"\")\n\n\ncursor.execute('INSERT INTO table2 (id, completed) VALUES (%s, %s);', (1, True))\n\nSQL = 'INSERT INTO table2 (id, completed) VALUES (%(id)s, %(completed)s);'\n\ndata = {\n 'id': 2,\n 'completed': False\n}\n\ncursor.execute(SQL, data)\n\ncursor.execute('SELECT * from table 2;')\n\nresult = cursor.fetchall()\nprint(result)\n\nconnection.commit()\n\ncursor.close()\nconnection.close()\n","sub_path":"psycopg_db.py","file_name":"psycopg_db.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"530858291","text":"from torch.autograd import Variable\nimport numpy as np\nimport os\nimport torch\nfrom scipy.sparse import lil_matrix\nfrom scipy.io import mmwrite\nfrom utils.utils_DEAL import *\n\nimport sys\nimport os\ncurrent_dir = os.path.dirname(os.path.abspath(\"__file__\"))\nsys.path.append( str(current_dir) + '/../../../' )\nfrom setting_param import all_node_num\nfrom setting_param import n_expanded\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True' # mmwriteでエラーが出るのを回避\n\ndef inference(dataloader, net, opt, OutputDir):\n net.eval()\n lambda_list = (0.1, 0.85, 0.05)\n device = torch.device('cuda:' + str(opt.cuda) if opt.gpu else 'cpu')\n\n for i, (sample_idx, data_arrays_link, dists, ind_train_A, ind_train_X, nodes_keep, A, X, label, mask) in enumerate(dataloader, 0):\n sample_idx, data_arrays_link, dists, ind_train_A, ind_train_X, nodes_keep, A, X, label, mask = sample_idx[0], data_arrays_link, dists[0], ind_train_A[0], ind_train_X[0], nodes_keep[0], A[0], X[0], label[0], mask[0]\n\n if opt.cuda:\n label = label.cuda()\n mask = mask.cuda()\n target = Variable(label)\n mask = Variable(mask)\n\n # for DEAL\n A, X, A_train, X_train, data, train_ones, val_edges, test_edges, val_labels, gt_labels, nodes_keep = load_datafile(data_arrays_link, dists, ind_train_A, ind_train_X, nodes_keep, A, X, opt)\n sp_X = convert_sSp_tSp(X).to(device).to_dense().double()\n\n\n true_, pred_ = get_pred(net, test_edges, gt_labels, sp_X, nodes_keep, lambdas=lambda_list)\n\n true = np.zeros((all_node_num+n_expanded, all_node_num+n_expanded))\n true[all_node_num:] = true_.reshape(n_expanded, all_node_num+n_expanded)\n true_between_new_nodes = true[all_node_num:, all_node_num:]\n true = true + true.T\n true[all_node_num:, all_node_num:] = true_between_new_nodes\n\n pred = np.zeros((all_node_num+n_expanded, all_node_num+n_expanded))\n pred[all_node_num:] = pred_.reshape(n_expanded, all_node_num+n_expanded)\n pred_between_new_nodes = pred[all_node_num:, all_node_num:]\n pred = pred + pred.T\n pred[all_node_num:, all_node_num:] = pred_between_new_nodes\n\n #assert np.all(target.numpy().astype(np.int32) == true.astype(np.int32)) # (マッチングで割り当てられずに余った真の new nodeのリンクはall_node_num以下の座標に入ったままになっているためそれを検知するがmask処理で無視されるので大丈夫)\n\n # 予測結果とラベルを保存\n os.makedirs(OutputDir + \"/output\", exist_ok=True)\n p = pred\n t = target.numpy()\n m = mask.numpy()\n mmwrite(OutputDir + \"/output/pred\" + str(sample_idx.numpy()), lil_matrix(p * m)) # 対象要素以外をゼロにして疎行列にする\n mmwrite(OutputDir + \"/output/true\" + str(sample_idx.numpy()), lil_matrix(t))\n mmwrite(OutputDir + \"/output/mask\" + str(sample_idx.numpy()), lil_matrix(m))\n","sub_path":"Model/link_prediction_new/DEAL/utils/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"539491672","text":"import datetime\nfrom Google import Create_Service\nfrom googleapiclient.http import MediaFileUpload\n\nclient_secret = 'client_secrets.json'\napi_name = 'youtube'\napi_version = 'V3'\nscopes = ['https://www.googleapis.com/auth/youtube.upload']\n\nservice = Create_Service(client_secret,api_name,api_version,scopes)\n\n#upload_date_time=datetime.datetime(2020,12,25,12,30,0).isoformat() + '.000Z'\n\nrequest_body = {\n\t'snippet':{\n\t\t'categoryI':19,\n\t\t'title': 'Uploading_test',\n\t\t'description':'Description_test',\n\t\t'tags':['Travel','video_test','Travel Tips']\t\n\t},\n\t'status': {\n\t'privacyStatus':'public',\n\t#'publishAt': upload_date_time,\n\t'selfDeclaredMadeForKids' : False,\n\t},\n\t'notifySubscribers':False\n}\n\nmediafile=MediaFileUpload('hello.mp4')\n\nresponse_upload = service.videos().insert(\n\tpart = 'snippet,status',\n\tbody=request_body,\n\tmedia_body=mediafile\n).execute()\nprint(response_upload['status'])\n\nservice.thumbnails().set(\n\tvideoId = response_upload.get('id'),\n \tmedia_body = MediaFileUpload('hello_th.jpg') \t\n).execute()","sub_path":"upload_video1.py","file_name":"upload_video1.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"293832202","text":"###################################################################################################\n#\n# EventTypeIdentification.py\n#\n# Copyright (C) by Andreas Zoglauer, Amal Metha & Caitlyn Chen.\n# All rights reserved.\n#\n# Please see the file License.txt in the main repository for the copyright-notice.\n#\n###################################################################################################\n\n\n# TODO: Train and test all multiplicities\n# TODO: Test performance as a function of energy\n# TODO: Test performance as a function of zenith angle\n# TODO: Test deep neural Networks\n# TODO: Test different libraries\n\n\n###################################################################################################\n\n\nimport ROOT\nimport array\nimport os\nimport sys\nimport time\nimport collections\nimport numpy as np\n\n\n###################################################################################################\n\n\nclass EventTypeIdentification:\n \"\"\"\n This class performs energy loss training. A typical usage would look like this:\n\n AI = EventTypeIdentification(\"Ling2.seq3.quality.root\", \"Results\", \"TF:VOXNET\", 1000000)\n AI.train()\n AI.test()\n\n \"\"\"\n\n\n###################################################################################################\n\n\n def __init__(self, FileName, Output, Algorithm, MaxEvents):\n \"\"\"\n The default constructor for class EventClustering\n\n Attributes\n ----------\n FileName : string\n Data file name (something like: X.maxhits2.eventclusterizer.root)\n OutputPrefix: string\n Output filename prefix as well as outout directory name\n Algorithms: string\n The algorithms used during training. Seperate multiples by commma (e.g. \"MLP,DNNCPU\")\n MaxEvents: integer\n The maximum amount of events to use\n\n \"\"\"\n\n self.FileName = FileName\n self.OutputPrefix = Output\n self.Algorithms = Algorithm\n self.MaxEvents = MaxEvents\n\n\n###################################################################################################\n\n\n def train(self):\n \"\"\"\n Switch between the various machine-learning libraries based on self.Algorithm\n \"\"\"\n\n if self.Algorithms.startswith(\"TF:\"):\n self.trainTFMethods()\n #elif self.Algorithms.startswith(\"TMVA:\"):\n # self.trainTMVAMethods()\n #elif self.Algorithms.startswith(\"SKL:\"):\n # self.trainSKLMethods()\n else:\n print(\"ERROR: Unknown algorithm: {}\".format(self.Algorithms))\n\n return\n\n\n###################################################################################################\n\n\n def loadData(self):\n \"\"\"\n Prepare numpy array datasets for scikit-learn and tensorflow models\n \n Returns:\n list: list of the events types in numerical form: 1x: Compton event, 2x pair event, with x the detector (0: passive material, 1: tracker, 2: absober)\n list: list of all hits as a numpy array containing (x, y, z, energy) as row \n \"\"\"\n \n print(\"{}: Load data from sim file\".format(time.time()))\n\n\n import ROOT as M\n\n # Load MEGAlib into ROOT\n M.gSystem.Load(\"$(MEGALIB)/lib/libMEGAlib.so\")\n\n # Initialize MEGAlib\n G = M.MGlobal()\n G.Initialize()\n \n # Fixed for the time being\n GeometryName = \"$(MEGALIB)/resource/examples/geomega/GRIPS/GRIPS.geo.setup\"\n\n # Load geometry:\n Geometry = M.MDGeometryQuest()\n if Geometry.ScanSetupFile(M.MString(GeometryName)) == True:\n print(\"Geometry \" + GeometryName + \" loaded!\")\n else:\n print(\"Unable to load geometry \" + GeometryName + \" - Aborting!\")\n quit()\n \n\n Reader = M.MFileEventsSim(Geometry)\n if Reader.Open(M.MString(self.FileName)) == False:\n print(\"Unable to open file \" + FileName + \". Aborting!\")\n quit()\n\n #Hist = M.TH2D(\"Energy\", \"Energy\", 100, 0, 600, 100, 0, 600)\n #Hist.SetXTitle(\"Input energy [keV]\")\n #Hist.SetYTitle(\"Measured energy [keV]\")\n\n\n EventTypes = []\n EventHits = []\n\n NEvents = 0\n while True: \n Event = Reader.GetNextEvent()\n if not Event:\n break\n \n Type = 0\n if Event.GetNIAs() > 0:\n if Event.GetIAAt(1).GetProcess() == M.MString(\"COMP\"):\n Type += 10 + Event.GetIAAt(1).GetDetectorType()\n elif Event.GetIAAt(1).GetProcess() == M.MString(\"PAIR\"):\n Type += 20 + Event.GetIAAt(1).GetDetectorType()\n else:\n break \n \n Hits = np.zeros((Event.GetNHTs(), 4))\n for i in range(0, Event.GetNHTs()):\n Hits[i, 0] = Event.GetHTAt(i).GetPosition().X()\n Hits[i, 1] = Event.GetHTAt(i).GetPosition().Y()\n Hits[i, 2] = Event.GetHTAt(i).GetPosition().Z()\n Hits[i, 3] = Event.GetHTAt(i).GetEnergy()\n \n NEvents += 1\n EventTypes.append(Type)\n EventHits.append(Hits)\n \n if NEvents >= self.MaxEvents:\n break\n\n #print(EventTypes)\n #print(EventHits)\n\n print(\"Occurances of different event types:\")\n print(collections.Counter(EventTypes))\n \n\n return EventTypes, EventHits\n\n\n###################################################################################################\n\n\n def trainTFMethods(self):\n \n # Load the data\n EventTypes, EventHits = self.loadData()\n \n # Add VoxNet here\n\n return\n\n\n###################################################################################################\n\n\n def test(self):\n \"\"\"\n Main test function\n\n Returns\n -------\n bool\n True is everything went well, False in case of an error\n\n \"\"\"\n\n return True\n\n\n\n\n# END\n###################################################################################################\n","sub_path":"eventtypeidentification/EventTypeIdentification.py","file_name":"EventTypeIdentification.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"456471054","text":"from django.conf.urls import patterns, url\n\nfrom video import views\n\nurlpatterns = patterns('',\n\turl(r'^api/random$', views.random, name='random'),\n\n\turl(r'^(?P\\d+)$', views.detail, name='detail'),\n\n\turl(r'^ajax/(?P\\d+)/get_info$', views.ajax_get_info, name='ajax_get_info'),\n\turl(r'^ajax/(?P\\d+)/yoink$', views.ajax_yoink, name='ajax_yoink'),\n\n\turl(r'^ajax/(?P\\d+)/download$', views.ajax_download, name='ajax_download'),\n\n\turl(r'^ajax/(?P\\d+)/save_rating$', views.ajax_save_rating, name='ajax_save_rating'),\n\turl(r'^ajax/(?P\\d+)/refresh_ratings$', views.ajax_refresh_ratings, name='ajax_refresh_ratings'),\n\turl(r'^ajax/(?P\\d+)/delete_rating$', views.ajax_delete_rating, name='ajax_delete_rating'),\n\n\turl(r'^list$', views.index, name='index'),\n\turl(r'^list/(?P\\d+)$', views.index, name='index'),\n\turl(r'^list/(?P)/(?P\\d+)$', views.index, name='index'),\n\turl(r'^list/(?P\\D\\w+)/(?P\\d+)$', views.index, name='index'),\n\n\turl(r'^myvideos$', views.myvideos, name='myvideos'),\n\turl(r'^myvideos/(?P\\d+)$', views.myvideos, name='myvideos'),\n\n\turl(r'^cart$', views.cart, name='cart'),\n\turl(r'^ajax/(?P\\d+)/cart$', views.ajax_cart, name='ajax_cart'),\n\turl(r'^ajax/cart_page_remove$', views.ajax_cart_page_remove, name='ajax_cart_page_remove'),\n\turl(r'^ajax/refresh_cart$', views.ajax_refresh_cart, name='ajax_refresh_cart'),\n\turl(r'^ajax/buy_cart$', views.ajax_buy_cart, name='ajax_buy_cart'),\n\n\turl(r'^upload$', views.upload, name='upload'),\n\n\turl(r'^(?P\\d+)/edit$', views.edit, name=\"edit\"),\n\turl(r'^(?P\\d+)/ajax_delete$', views.ajax_delete, name=\"ajax_delete\"),\n\turl(r'^deleted$', views.deleted, name=\"deleted\"),\n\n\turl(r'^(?P\\d+)/process$', views.process, name=\"process\"),\n\turl(r'^ajax/(?P\\d+)/process$', views.ajax_process, name=\"ajax_process\"),\n)\n\n","sub_path":"site/video/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"16877064","text":"import json\nimport requests\n\n# @staticmethod\ndef test(**kwargs):\n print('inside test()')\n print(kwargs)\n # print(args)\n for key, value in kwargs.items(): \n print (\"%s == %s\" %(key, value)) \n return \"hello there test\"\n\ndef linkedauth(code):\n print('inside linkedauth()')\n print('and value of code is : ')\n print(code)\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n params = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': 'http://localhost:1024/experiments', \n 'client_id': '78lloahtzyrsoh', \n 'client_secret': 'DuMq3nH3u8j651Os'\n }\n print('value of params')\n print(params)\n url = 'https://www.linkedin.com/oauth/v2/accessToken'\n response = requests.post(url, headers=headers, params=params)\n return response.text\n\ndef linkedauthcallback():\n print('inside linkedauthcallback()')\n return 'linkedauthcallback return'\n\ndef linkeduserprofile(req):\n print('inside linkeduserprofile()')\n print('value of req')\n print(req)\n token = req.get('payload').get('token')\n bearerToken = \"Bearer \" + token\n headers = {\"Authorization\": bearerToken}\n url = 'https://api.linkedin.com/v2/me'\n response = requests.get(url, headers=headers)\n print(response.text)\n return 'linkeduserprofile return'","sub_path":"backend/routes/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"536571395","text":"#-*- coding:utf-8 -*-\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport tensorflow as tf\nimport csv\nimport os\nimport decimal\n\nfrom datasets import dataset_factory\nfrom nets import nets_factory\nfrom preprocessing import preprocessing_factory\nfrom tensorflow.python.training import saver as tf_saver\n\nslim = tf.contrib.slim\n\ntf.app.flags.DEFINE_integer(\n 'batch_size', 100, 'The number of samples in each batch.')\n\ntf.app.flags.DEFINE_integer(\n 'max_num_batches', None,\n 'Max number of batches to evaluate by default use all.')\n\ntf.app.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', '/tmp/tfmodel/',\n 'The directory where the model was written to or an absolute path to a '\n 'checkpoint file.')\n\ntf.app.flags.DEFINE_string(\n 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')\n\ntf.app.flags.DEFINE_integer(\n 'num_preprocessing_threads', 1,\n 'The number of threads used to create the batches.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_name', 'imagenet', 'The name of the dataset to load.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_split_name', 'test', 'The name of the train/test split.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir', None, 'The directory where the dataset files are stored.')\n\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\n\ntf.app.flags.DEFINE_string(\n 'model_name', 'inception_v3', 'The name of the architecture to evaluate.')\n\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None, 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.')\n\ntf.app.flags.DEFINE_float(\n 'moving_average_decay', None,\n 'The decay to use for the moving average.'\n 'If left as None, then moving averages are not used.')\n\ntf.app.flags.DEFINE_integer(\n 'eval_image_size', None, 'Eval image size')\n\ntf.app.flags.DEFINE_string(\n 'output_dir', './pig_result', 'result file')\n\ntf.app.flags.DEFINE_boolean(\n 'dropout_keep_prob', 1.0,\n 'Dropout keep probability.')\n\nFLAGS = tf.app.flags.FLAGS\n\nctx = decimal.Context()\nctx.prec = 20\n\ndef float_to_str(f):\n d1 = ctx.create_decimal(repr(f))\n return format(d1, 'f')\n\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n tf.logging.set_verbosity(tf.logging.INFO)\n with tf.Graph().as_default():\n tf_global_step = slim.get_or_create_global_step()\n\n ######################\n # Select the dataset #\n ######################\n dataset = dataset_factory.get_dataset(\n FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)\n\n ####################\n # Select the model #\n ####################\n #num_classes = dataset.num_classes-FLAGS.labels_offset\n num_classes = None\n network_fn = nets_factory.get_network_fn(\n FLAGS.model_name,\n num_classes=num_classes,\n is_training=False)\n\n ##############################################################\n # Create a dataset provider that loads data from the dataset #\n ##############################################################\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n shuffle=False,\n common_queue_capacity=2 * FLAGS.batch_size,\n common_queue_min=FLAGS.batch_size)\n [image, name] = provider.get(['image', 'name'])\n\n #####################################\n # Select the preprocessing function #\n #####################################\n preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(\n preprocessing_name,\n is_training=False)\n\n eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size\n\n image = image_preprocessing_fn(image, eval_image_size, eval_image_size)\n\n images, names = tf.train.batch(\n [image, name],\n batch_size=FLAGS.batch_size,\n num_threads=FLAGS.num_preprocessing_threads,\n capacity=5 * FLAGS.batch_size)\n\n ####################\n # Define the model #\n ####################\n net, end_points = network_fn(images)\n num_classes=dataset.num_classes - FLAGS.labels_offset\n\n with tf.variable_scope('InceptionV4', [net], reuse=None) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=True):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n # Auxiliary Head logits\n if num_classes:\n with tf.variable_scope('AuxLogits'):\n # 17 x 17 x 1024\n aux_logits = tf.slice(end_points['Mixed_6h'], [0, 0, 0, 0], [FLAGS.batch_size, -1, -1, -1])\n aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,\n padding='VALID',\n scope='AvgPool_1a_5x5')\n aux_logits = slim.conv2d(aux_logits, 128, [1, 1],\n scope='Conv2d_1b_1x1')\n aux_logits = slim.conv2d(aux_logits, 768,\n aux_logits.get_shape()[1:3],\n padding='VALID', scope='Conv2d_2a')\n aux_logits = slim.flatten(aux_logits)\n aux_logits = slim.fully_connected(aux_logits, num_classes,\n activation_fn=None,\n scope='Aux_logits')\n end_points['AuxLogits'] = aux_logits\n\n # Final pooling and prediction\n # TODO(sguada,arnoegw): Consider adding a parameter global_pool which\n # can be set to False to disable pooling here (as in resnet_*()).\n with tf.variable_scope('Logits'): \n # 1 x 1 x 1536\n net = slim.dropout(net, FLAGS.dropout_keep_prob, scope='Dropout_1b')\n net = slim.flatten(net, scope='PreLogitsFlatten')\n end_points['PreLogitsFlatten'] = net\n # 1536\n logits = slim.fully_connected(net, num_classes, activation_fn=None,\n scope='Logits')\n #####################\n # Add dropout layer #\n #####################\n if FLAGS.model_name[:6] == \"resnet\":\n with tf.variable_scope(FLAGS.model_name, 'my_logits', [logits], reuse=None) as scope:\n with slim.arg_scope([slim.dropout], is_training=True):\n logits = slim.dropout(logits, FLAGS.dropout_keep_prob, scope='my_dropout')\n #logits = tf.reshape(logits, [-1, FLAGS.feature_size*FLAGS.feature_size*FLAGS.channel_num])\n logits = slim.flatten(logits, scope='flatten')\n logits = slim.fully_connected(logits, dataset.num_classes-FLAGS.labels_offset, \n activation_fn=None, scope='my_logits_layers')\n end_points['My_Logits'] = logits\n logits = tf.nn.softmax(logits, 1)\n\n if FLAGS.moving_average_decay:\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, tf_global_step)\n variables_to_restore = variable_averages.variables_to_restore(\n slim.get_model_variables())\n variables_to_restore[tf_global_step.op.name] = tf_global_step\n else:\n variables_to_restore = slim.get_variables_to_restore()\n \n '''\n for var in variables_to_restore:\n tf.logging.info('%s' % var.op.name)\n '''\n\n # TODO(sguada) use num_epochs=1\n if FLAGS.max_num_batches:\n num_batches = FLAGS.max_num_batches\n else:\n # This ensures that we make a single pass over all of the data.\n num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))\n \n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Testing %s' % checkpoint_path)\n \n sess_conf = tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)\n sess = tf.Session(config=sess_conf)\n with sess.as_default():\n tf.logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n if variables_to_restore is not None:\n saver = tf_saver.Saver(variables_to_restore)\n saver.restore(sess, checkpoint_path)\n else:\n tf.logging.error(\"Fail to load checkpoint: %s\", os.path.basename(checkpoint_path))\n tf.logging.info(\"Successfully loaded checkpoint: %s\", os.path.basename(checkpoint_path))\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n \n results = {}\n keys = set([])\n print(num_batches)\n for i in xrange(int(num_batches)):\n batch_name, batch_logit = sess.run([names, logits])\n for i, n in enumerate(batch_name):\n if n in keys:\n print(n)\n elif n[:-4] == \"1363\":\n print(\"dd\")\n print(n)\n else:\n keys.add(n)\n for j, l in enumerate(batch_logit[i]):\n #results.append([n[:-4], dataset.labels_to_names[j], str(round(l, 6))])\n if int(n[:-4]) not in results:\n results[int(n[:-4])] = {}\n results[int(n[:-4])][int(dataset.labels_to_names[j])] = l\n \n output_file = os.path.join(FLAGS.output_dir, FLAGS.model_name+\".csv\")\n tf.logging.info('Saving test result to %s.', output_file)\n n_k = results.keys()\n n_k.sort()\n with open(output_file, 'wb') as write_f:\n w = csv.writer(write_f)\n for n in n_k:\n cls_k = results[n].keys()\n cls_k.sort()\n for c in cls_k:\n w.writerow([n, c, float_to_str(results[n][c])])\n\n coord.request_stop()\n coord.join(threads)\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"test_image_classifier.py","file_name":"test_image_classifier.py","file_ext":"py","file_size_in_byte":11148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"192105599","text":"# -*- coding: utf-8 -*-\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.patches as patches\nimport logging\nimport numpy as np\nimport os\nimport pyproj\n\nlog = logging.getLogger(__name__)\n\n\ndef img2map(r, s, pixelsize, upperleft_x, upperleft_y):\n \"\"\"Convert from image coordinates to map coordinates\"\"\"\n xmap = upperleft_x + (s * pixelsize)\n ymap = upperleft_y - (r * pixelsize)\n return xmap, ymap\n\n\ndef map2img(xmap, ymap, pixelsize, upperleft_x, upperleft_y):\n \"\"\"Convert from map coordinates to image coordinates\"\"\"\n s = (xmap - upperleft_x) / pixelsize\n r = (upperleft_y - ymap) / pixelsize\n return r, s\n\n\ndef map2geo(xmap, ymap, proj):\n \"\"\"Convert from map coordinates to geographical coordinates\"\"\"\n return proj(xmap, ymap, inverse=True)\n\n\ndef geo2map(lon, lat, proj):\n \"\"\"Convert from geographical coordinates to map coordinates\"\"\"\n return proj(lon, lat, inverse=False)\n\n\ndef great_circle_distance(lon0, lat0, lon1, lat1, ellps='WGS84',\n option='width'):\n # g = pyproj.Geod(ellps='clrk66')\n g = pyproj.Geod(ellps=ellps)\n az01, az10, dist = g.inv(lon0, lat0, lon1, lat1)\n return dist\n\n\ndef is_number(s):\n \"\"\"Is s a floating point number\"\"\"\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\ndef proj2matplotlib_basemap_proj(remotesensingdata):\n # Parse projstring to basemap keyword arguments\n projstring = remotesensingdata.metadata['projstring']\n basemap_allowed_keys = ['llcrnrlon', 'llcrnrlat', 'urcrnrlon', \\\n 'urcrnrlat', 'llcrnrx', 'llcrnry', 'urcrnrx', \\\n 'urcrnry', 'projection', 'resolution', 'rsphere', \\\n 'ellps', 'lat_ts', 'lat_0', 'lon_0']\n\n kwargs = {}\n for projsubstring in projstring.split():\n if len(projsubstring.split('=')) == 2:\n key = projsubstring.split('=')[0][1:]\n value = projsubstring.split('=')[1]\n if key == 'proj': # Basemap uses proj instead of projection\n key = 'projection'\n if key == 'ellps': # Basemap uses ellips instead of ellps\n key = 'ellips'\n elif key == 'k':\n key = 'k_0'\n elif key == 'a':\n key = 'rsphere'\n elif key == 'b':\n key = 'rsphere'\n if key in basemap_allowed_keys:\n if is_number(value):\n kwargs[key] = float(value)\n else:\n kwargs[key] = value\n\n kwargs['llcrnrlon'] = remotesensingdata.metadata['lowerleft_lon']\n kwargs['llcrnrlat'] = remotesensingdata.metadata['lowerleft_lat']\n kwargs['urcrnrlon'] = remotesensingdata.metadata['upperright_lon']\n kwargs['urcrnrlat'] = remotesensingdata.metadata['upperright_lat']\n\n # Best choice of resolution based on extent of map (c, l, i, h, f)\n map_extent_m = great_circle_distance(kwargs['llcrnrlon'],\n kwargs['llcrnrlat'], kwargs['urcrnrlon'],\n kwargs['urcrnrlat'])\n log.debug('map_extent_m=%s' % map_extent_m)\n if map_extent_m < 200e2:\n kwargs['resolution'] = 'f'\n elif map_extent_m < 200e3:\n kwargs['resolution'] = 'h'\n elif map_extent_m < 200e4:\n kwargs['resolution'] = 'i'\n kwargs['area_thresh'] = 20 # Default is 100\n elif map_extent_m < 200e5:\n kwargs['resolution'] = 'l'\n else:\n kwargs['resolution'] = 'c'\n\n log.debug('Basemap resolution=%s' % kwargs['resolution'])\n\n return kwargs\n\n\ndef make_map(remotesensingdata, outfilename=None, dpi=1280):\n\n # Translate projection of radardataset to matplotlib/basemap projection\n kwargs = proj2matplotlib_basemap_proj(remotesensingdata)\n log.debug('Proj parameters using Basemap keywords: %s' % kwargs)\n\n # Map\n m = Basemap(**kwargs)\n\n # Colormap\n colormapname = 'radarrainbow'\n colormapname = 'monochrome_gray'\n if colormapname == 'raderrainbow':\n rgb_reflectivity_colors = {\n -32.0: [189,215,231],\n 5.0: [107,174,214],\n 10.0: [33,113,181],\n 15.0: [65,171,93],\n 20.0: [35,139,69],\n 25.0: [0,109,44],\n 30.0: [0, 60, 10],\n 35.0: [253,141,60],\n 40.0: [227,26,28],\n 45.0: [203,24,29],\n 50.0: [165,15,21],\n 55.0: [174,1,126],\n 60.0: [122,1,119],\n 65.0: [73,0,106],\n 70.0: [30,0,60],\n 75.0: [0,0,0]\n }\n elif colormapname == 'monochrome_blue':\n rgb_reflectivity_colors = {\n -32.0: [208,209,230],\n 5.0: [166,189,219],\n 20.0: [116,169,207],\n 35.0: [54,144,192],\n 50.0: [5,112,176],\n 75.0: [3,78,123]\n }\n elif colormapname == 'monochrome_gray':\n rgb_reflectivity_colors = {\n -32.0: [208]*3,\n 5.0: [166]*3,\n 20.0: [116]*3,\n 35.0: [54]*3,\n 50.0: [25]*3,\n 75.0: [0]*3\n }\n\n # Custom colors\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(241)] = [225,25,25] # CB\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(242)] = [25,225,25] # TCU\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(243)] = [255, 255, 255] #\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(244)] = [127, 127, 127] # Radar centre\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(245)] = [255, 255, 255] # (Equal distances)\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(246)] = [127, 127, 127] # Airports centre\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(247)] = [248,248,30] # Lyn\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(248)] = [200, 200, 200] # radMin\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(249)] = [200, 200, 200] # radMid\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(250)] = [200, 200, 200] # radMax\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(251)] = [0,0,0] # CB with TS\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(252)] = [77,175,74] # CB with VCTS\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(253)] = [152,78,163] # CB only\n rgb_reflectivity_colors[remotesensingdata.get_physical_values(254)] = [55,126,184] # TCU only\n\n rgb_colors = rgb_reflectivity_colors\n rgb_nodata = np.array([200,200,200])/255.\n rgb_undetect = [1, 1, 1]\n\n colors = []\n for key in sorted(rgb_colors.keys()):\n rgb01 = []\n for rgb in rgb_colors[key]:\n rgb01.append(rgb/255.)\n\n colors.append(rgb01)\n\n bounds = sorted(rgb_colors.keys())\n extend = 'max'\n extend = 'neither'\n if extend == 'both':\n boundaries = [bounds[0]-1] + bounds + [bounds[-1]+1]\n elif extend == 'max':\n boundaries = bounds + [bounds[-1]+1]\n elif extend == 'min':\n boundaries = [bounds[0]-1] + bounds\n else:\n boundaries = bounds\n\n # Image\n data = remotesensingdata.data\n nodatamask = np.where(data == remotesensingdata.metadata['nodata'])\n undetectmask = np.where(data == remotesensingdata.metadata['undetect'])\n\n data = (data*remotesensingdata.metadata['gain']) \\\n + remotesensingdata.metadata['offset']\n data[np.where(databounds[-1])] = bounds[-1]\n\n data[nodatamask] = np.nan\n data[undetectmask] = bounds[0]-1\n\n # Figure\n fig = plt.figure(1, frameon=True, figsize=(10, 9))\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.set_axis_bgcolor(rgb_nodata)\n\n # Colormap\n cmap = matplotlib.colors.ListedColormap(colors)\n cmap.set_under(rgb_undetect)\n cmap.set_over(colors[-1])\n\n norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)\n\n # ny = radardataset.numrows\n # nx = radardataset.numcols\n # lons, lats = m.makegrid(nx, ny)\n # x, y = m(lons, lats)\n\n cs = m.imshow(data, origin='lower', interpolation='nearest', extent=bounds, cmap=cmap, norm=norm)\n\n # HACK\n boundaries = bounds[0:7]\n bounds = bounds[0:7]\n\n cbar = m.colorbar(cs, location='right', cmap=cmap, norm=norm,\n boundaries=boundaries, ticks=bounds, spacing='uniform', format='%0i', extend=extend)\n cbar.set_ticks([-32, 5, 20, 35, 50, 75, remotesensingdata.get_physical_values(240)])\n cbar.set_ticklabels(['-32', '5', '20', '35', '50', '75', str(int(remotesensingdata.get_physical_values(241)))])\n cbar.set_label('dBZ')\n\n #cbar.set_label(radardataset.product)\n def rgb2tuple(rgb):\n return tuple(map(lambda x: (float(x)/255.), rgb))\n\n fig.text(0.605, 0.61, ' \\n'*13, fontsize=9, bbox=dict(facecolor='white', alpha=1.0))\n fig.text(0.609, 0.81, 'Radarbased METAR')\n fig.text(0.609, 0.79, ' CB radar echoes')\n fig.text(0.609, 0.77, ' TCU radar echoes')\n fig.text(0.609, 0.75, ' Lightning strikes')\n fig.text(0.609, 0.73, ' CB only')\n fig.text(0.609, 0.71, ' TCU only')\n fig.text(0.609, 0.69, ' CB with VCTS')\n fig.text(0.609, 0.67, ' CB with TS')\n circ=plt.Circle((0.5, 0.5), radius=100, color='green')\n ax.add_patch(circ)\n # cs = m.imshow(data, origin='lower', interpolation='nearest')\n\n\n # Coastlines\n m.drawcoastlines(color='black', linewidth=0.2)\n\n if outfilename is not None:\n plt.savefig(outfilename, pad_inches=0.01, bbox_inches='tight',\n dpi=dpi, transparent=False)\n log.info('Wrote file: %s' % outfilename)\n else: # If no output file given then just show the image\n plt.show()\n\n\n plt.close()\n","sub_path":"utils_map.py","file_name":"utils_map.py","file_ext":"py","file_size_in_byte":9894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"414781612","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 23 17:16:43 2017\r\n\r\n@author: prade_000\r\n\"\"\"\r\n\r\nrows = 'ABCDEFGHI'\r\ncols = '123456789'\r\n\r\ndef cross(a, b):\r\n return [s+t for s in a for t in b]\r\n\r\nboxes = cross(rows, cols)\r\n\r\nrow_units = [cross(r, cols) for r in rows]\r\ncolumn_units = [cross(rows, c) for c in cols]\r\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\r\n# create diagonal units. adding these to the units automaticlly applies all constrains on them\r\ndiagonal_units =[]\r\ndiagonal_units1 =[]\r\ndiagonal_units2 =[]\r\nfor i in (range(9)):\r\n diagonal_units1.append(rows[i]+cols[i])\r\n diagonal_units2.append(rows[i]+str(9-i))\r\ndiagonal_units.append(diagonal_units1)\r\ndiagonal_units.append(diagonal_units2)\r\nunitlist = row_units + column_units + square_units+ diagonal_units\r\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\r\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\r\n\r\ndef display(values):\r\n \"\"\"\r\n Display the values as a 2-D grid.\r\n Input: The sudoku in dictionary form\r\n Output: None\r\n \"\"\"\r\n width = 1+max(len(values[s]) for s in boxes)\r\n line = '+'.join(['-'*(width*3)]*3)\r\n for r in rows:\r\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\r\n for c in cols))\r\n if r in 'CF': print(line)\r\n return","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"423935394","text":"from urllib.parse import urlencode\n\nfrom django import template\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef urlparams(param, *args, **kwargs):\n param = param.copy()\n\n dicts = list(args)\n dicts.append(kwargs)\n for dict in dicts:\n for key, value in dict.items():\n if value is None:\n if key in param:\n del param[key]\n else:\n param[key] = value\n\n return '?' + urlencode(param)\n","sub_path":"home/templatetags/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"490821003","text":"\"\"\"Model unit tests.\"\"\"\nimport pytest\nfrom run4it.api.profile.model import Profile\nfrom run4it.api.user.model import User\n\n@pytest.mark.usefixtures('db')\nclass TestProfileModel:\n\n def test_get_by_id(self):\n # note: 1-to-1 relationship User<=>Profile. Thus we need a User.\n user = User('user', 'user@mail.com')\n user.save()\n new_profile = Profile(user)\n new_profile.save()\n retrieved_profile = Profile.get_by_id(new_profile.id)\n assert(retrieved_profile == new_profile)\n\n def test_profile_unique(self, db):\n user = User('user', 'user@mail.com')\n user.save()\n profile1 = Profile(user)\n profile1.save()\n\n try:\n profile2 = Profile(user)\n profile2.save()\n\n except:\n db.session.rollback()\n\n num_profiles = db.session.query(Profile).count()\n assert(num_profiles == 1)\n \n def test_profile_username(self):\n user = User('profileUsername', 'user@mail.com')\n user.save()\n profile = Profile(user)\n profile.save()\n assert(profile.username == 'profileUsername')\n\n def test_profile_data_defaults_to_none(self):\n user = User('user', 'user@mail.com')\n user.save()\n new_profile = Profile(user)\n new_profile.save()\n assert(new_profile.height is None)\n assert(new_profile.weight is None) \n assert(new_profile.birth_date is None) \n assert(new_profile.gender_code is None) \n","sub_path":"tests/test_model_profile.py","file_name":"test_model_profile.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"598417550","text":"from django.conf import settings\nfrom django.db import models\nfrom django.apps import apps\n\nUSER_MODEL = getattr(settings, 'AUTH_USER_MODEL', None) or 'auth.User'\napp_label, model_name = USER_MODEL.split('.')\n\nUser = apps.get_registered_model(app_label, model_name)\n\n\nclass Post(models.Model):\n author = models.ForeignKey(User, related_name='posts')\n title = models.CharField(max_length=255)\n body = models.TextField(blank=True, null=True)\n\n\nclass Photo(models.Model):\n post = models.ForeignKey(Post, related_name='photos')\n image = models.ImageField(upload_to=\"%Y/%m/%d\")\n","sub_path":"forum/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"474712911","text":"\"\"\"\r\ntarget_dir\r\n |-imageA\r\n |-imageB\r\n ...\r\n\r\n上記のような構造を持つディレクトリ内の各画像に対して、学習済みのモデルを用いて認識を行う。\r\n現状\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torchvision import transforms, datasets\r\nfrom torch.autograd import Variable\r\n\r\nimport os\r\nimport numpy as np\r\nimport cv2\r\n\r\n# Device configuration\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\nprint(\"device:\", device)\r\n\r\n# Hyper parameters\r\nNUM_CLASSES = 6\r\nIMAGE_SIZE = 32\r\nFACE_SIZE = 32\r\nBATCH_SIZE = 10\r\n\r\nENTER_KEY = 13 # ENTERキー\r\nESC_KEY = 27 # ESCキー\r\nCASCADE_PATH = \"C:\\\\workspace_py\\\\Anaconda3\\\\envs\\\\py35\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_default.xml\"\r\n\r\n# 分類器の指定\r\ncascade = cv2.CascadeClassifier(CASCADE_PATH)\r\n\r\n\r\ndef detect_maxsize_faces(target_image):\r\n target_gray = cv2.cvtColor(target_image, cv2.COLOR_BGR2GRAY)\r\n face_list = cascade.detectMultiScale(target_gray, minSize=(200, 200)) \r\n\r\n # 検出した顔に印を付ける\r\n f_x = 0\r\n f_y = 0\r\n f_size = 0\r\n for (x, y, w, h) in face_list:\r\n color = (0, 0, 225)\r\n pen_w = 1\r\n\r\n # 一番大きい検出結果を顔とする\r\n if w > f_size:\r\n f_x = x\r\n f_y = y\r\n f_size = w\r\n\r\n return len(face_list), f_x, f_y, f_size\r\n\r\n\r\ndef make_thumnail(nail_path):\r\n nail_list = os.listdir(nail_path)\r\n blank = np.zeros((60, 128, 3), np.uint8)\r\n\r\n for i, nail in enumerate(nail_list):\r\n data = os.listdir(os.path.join(nail_path, nail))\r\n nail_name = os.path.join(nail_path, nail, data[0])\r\n nail_image = cv2.imread(nail_name)\r\n nail_image = cv2.resize(nail_image, (128, 128))\r\n # thumnail = cv2.hconcat([thumnail, nail_image])\r\n if i == 0:\r\n thumnail = np.concatenate((nail_image, blank), axis=0)\r\n else:\r\n thumnail_temp = np.concatenate((nail_image, blank), axis=0)\r\n thumnail = np.concatenate((thumnail, thumnail_temp), axis=1)\r\n\r\n return thumnail\r\n\r\n\r\n\"\"\"\r\nresultはモデルから出力された値の\r\n.data(構造体の要素)を想定している。\r\nshapeは(1, class数)でtypeはtensor。\r\n\"\"\"\r\ndef show_result(thumnail, results, labels):\r\n x = 0\r\n y = 128\r\n\r\n # 描画対象区間を初期化\r\n # これをやらないと文字がどんどん重なる\r\n # (始点のx, 始点のy)から(終点のx, 終点のx)まで塗りつぶし\r\n cv2.rectangle(thumnail, (0, 128), (128*6, 128+70), (0,0,0), -1)\r\n\r\n # 各クラスごとに結果を描画する\r\n for i in range(len(results[0])):\r\n result_text = str(results[0][i]) \r\n result_text = result_text[6:] # tensorという文字が消えないので\r\n\r\n cv2.putText(thumnail, labels[i], (x,y+20), fontFace=cv2.FONT_HERSHEY_PLAIN,\\\r\n fontScale=1, color=(0,255,255), thickness=2)\r\n cv2.putText(thumnail, result_text, (x,y+40), fontFace=cv2.FONT_HERSHEY_PLAIN,\\\r\n fontScale=1, color=(255,255,255), thickness=1)\r\n\r\n x += 128\r\n\r\n cv2.imshow(\"thumnail\", thumnail)\r\n\r\n\r\n# CNN\r\n# https://qiita.com/kazetof/items/6a72926b9f8cd44c218e\r\n# conv2dの引数は左から,インプットのチャンネルの数,アウトプットのチャンネルの数,カーネルサイズ\r\n\"\"\"\r\ninput(3, 32, 32)\r\nconv1(3, 6, 5) => (6, 28, 28)\r\npool1(2, 2) => (6, 14, 14)\r\nconv2(6, 16, 5) => (16, 10, 10)\r\npool2(2, 2) => (16, 5, 5)\r\n\r\n16 * 5 * 5 = 400 => 120\r\n120 => 84\r\n84 => 4\r\n\"\"\"\r\nclass CNN_32(nn.Module):\r\n def __init__(self):\r\n super(CNN_32, self).__init__()\r\n self.conv1 = nn.Conv2d(3, 6, 5)\r\n self.pool = nn.MaxPool2d(2, 2)\r\n self.conv2 = nn.Conv2d(6, 16, 5)\r\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\r\n self.fc2 = nn.Linear(120, 84)\r\n self.fc3 = nn.Linear(84, NUM_CLASSES)\r\n self.relu = nn.ReLU()\r\n # self.softmax = nn.LogSoftmax()\r\n self.softmax = nn.Softmax()\r\n\r\n def forward(self, x):\r\n x = self.pool(self.relu(self.conv1(x)))\r\n x = self.pool(self.relu(self.conv2(x)))\r\n x = x.view(-1, 16 * 5 * 5)\r\n x = self.relu(self.fc1(x))\r\n x = self.relu(self.fc2(x))\r\n x = self.fc3(x)\r\n x = self.softmax(x)\r\n return x\r\n\r\n\r\ndef main():\r\n # ラベルの作成\r\n label = os.listdir(\"..\\\\actress\\\\train\")\r\n\r\n # サムネイルの作成\r\n thumnail = make_thumnail(\"..\\\\actress\\\\train\")\r\n\r\n # モデルのリストア\r\n model = CNN_32 ().to(device)\r\n # model = CNN_64().to(device)\r\n param = torch.load('model.ckpt') # パラメータの読み込み\r\n model.load_state_dict(param)\r\n\r\n # カメラをキャプチャする\r\n print(\"capture_camera\")\r\n cap = cv2.VideoCapture(0) # 0はカメラのデバイス番号\r\n\r\n count = -1\r\n\r\n # ネットワークを推論モードに切り替える\r\n model.eval()\r\n with torch.no_grad(): # 推論中は勾配の保存を止める(メモリのリーク?を防ぐため)\r\n while True:\r\n # retは画像を取得成功フラグ\r\n ret, frame = cap.read()\r\n\r\n # フレームから顔の個数と最大値の座標とサイズを取得\r\n f_num, f_x, f_y, f_size = detect_maxsize_faces(frame)\r\n\r\n # 顔があったら認識を開始する\r\n if f_num > 0:\r\n # フレームから顔を抽出\r\n face_image = frame[f_y:f_y+f_size, f_x:f_x+f_size]\r\n\r\n # 顔画像をリサイズ\r\n if FACE_SIZE is not None:\r\n size = (FACE_SIZE, FACE_SIZE)\r\n face_image = cv2.resize(face_image, size)\r\n\r\n # 学習済みモデルに抽出した顔画像を入力\r\n x = np.transpose(face_image, (2, 0, 1)) # (縦, 横, ch)を(ch, 縦, 横)\r\n x = np.array([x])\r\n x = torch.Tensor(x)\r\n x = Variable(x).to(device)\r\n\r\n # 認識結果\r\n outputs = model(x)\r\n _, predicted = torch.max(outputs.data, 1)\r\n\r\n # 結果出力速度の調整\r\n if count > 50 or count == -1:\r\n # 認識結果をサムネイルとともに表示\r\n show_result(thumnail, outputs.data, label)\r\n count = 0\r\n\r\n # 認識��果をフレームに描画する\r\n answer = predicted[0]\r\n cv2.rectangle(frame, (f_x, f_y), (f_x+f_size, f_y+f_size), (0,255,0), thickness=2)\r\n cv2.putText(frame, label[answer], (f_x,f_y), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\\\r\n fontScale=2, color=(255,255,255), thickness=3)\r\n\r\n # フレームを画面に表示\r\n cv2.imshow('recognition', frame)\r\n\r\n count += 1\r\n\r\n # キー入力による処理\r\n k = cv2.waitKey(1)\r\n if k == ESC_KEY: # ESCを押したら終了\r\n print(\"Exit...\")\r\n break\r\n elif k == ENTER_KEY: # ENTERを押したら顔保存\r\n print(\"Save now frame...\")\r\n cv2.imwrite(\"face_image.jpg\", frame)\r\n\r\n # キャプチャを解放する\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"realtime_recongition.py","file_name":"realtime_recongition.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"419598546","text":"import numpy as np\n\ndef tabela(valorImc):\n if valorImc < 16:\n return str(valorImc) + ': Magreza severa'\n elif valorImc < 18:\n return str(valorImc) + ': Magreza moderada'\n elif valorImc < 25:\n return str(valorImc) + ': Saudavel'\n elif valorImc < 35:\n return str(valorImc) + ': Sobrepeso'\n elif valorImc < 45:\n return str(valorImc) + ': Obesidade'\n\naltura, peso, forca = np.loadtxt(r'C:\\Users\\matza\\OneDrive\\Documentos\\TESTE\\Scripts-Python\\peso.csv.txt',\n delimiter=';',\n unpack=True,\n dtype='float')\n\nimc = peso / altura ** 2\n\n#qual o menor valor dessa populaçao?\nprint('Min: '+ tabela(np.amin(imc)))\n#qual o maior valor dessa populaçao?\n#print(\"Max: \"+ tabela(np.amax(imc))) ta com erro","sub_path":"Scripts/DataScience-02-1.py","file_name":"DataScience-02-1.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"231943337","text":"class Chat:\n def __init__(self, obj: dict):\n self.id = obj.get('id')\n self.type = obj.get('type') # private / group / supergroup / channel\n\n # optional\n\n self.title = obj.get('title')\n self.username = obj.get('username')\n self.first_name = obj.get('first_name')\n self.last_name = obj.get('last_name')\n self.all_members_are_administrators = obj.get('all_members_are_administrators')\n\n def __str__(self):\n return \"{} : {}\".format(self.id, self.type)\n\n def __repr__(self):\n return (\"id : {}\\n\"\n \"type : {}\\n\"\n \"title : {}\\n\"\n \"username : {}\\n\"\n \"first_name : {}\\n\"\n \"last_name : {}\").format(self.id, self.type, self.title,\n self.username, self.first_name,\n self.last_name)\n","sub_path":"bot_types/chat_type.py","file_name":"chat_type.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"56827362","text":"#Imports\nimport tkinter as tk\nfrom tkinter import ttk\nimport time, sys, math\nimport numpy as np\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom scipy import signal\n\nfont_ = ('Arial', 14)\nbackground_ = '#f5f5f5'\nforeground_ = '#2c2f33'\n\nroot = tk.Tk()\nroot.configure(background = background_)\nroot.title('Модуляция')\n#root.geometry('1920x1080')\nroot.resizable(1280, 720)\n\n#style\nstyle = ttk.Style()\nstyle.theme_create('mod_theme', parent = 'alt', settings = {\n 'TCombobox': {'configure':{\n 'selectbackground': 'white',\n 'fieldbackground': 'white',\n 'selectforeground': foreground_\n }},\n 'TLabel': {'configure':{\n 'background': background_,\n 'foreground': foreground_\n }}\n})\nstyle.theme_use(\"mod_theme\")\n\ndef func(event=None):\n pass\n\n#signal\ndef signal_freq_change(event=None):\n global s_frq\n s_frq = int(signal_freq.get())\n\ndef signal_amp_change(event=None):\n global s_amp, c_amp, carry_amp\n s_amp = int(signal_amp.get())\n carry_amp.set(s_amp*2)\n c_amp = s_amp*2\n\n#carry\ndef carry_freq_change(event=None):\n global c_frq\n c_frq = int(carry_freq.get())\n\ndef carry_amp_change(event=None):\n global c_amp\n c_amp = int(carry_amp.get())\n\ndef scale_up(event=None):\n prev = signal_lvl.get()\n signal_lvl.set(prev+1)\n\ndef scale_down(event=None):\n prev = signal_lvl.get()\n signal_lvl.set(prev-1)\n\nplot_container = tk.LabelFrame(root, text = '', height = 960, width = 1280, font = font_, bg = 'white')\nplot_container.grid(row = 0, column = 0, rowspan = 9, padx = 5, pady = 5)\nplot_container.configure(background = background_, foreground = foreground_)\n\n#main label\ntype_text = tk.StringVar()\ntype_text.set('Амплитудная модуляция')\ntype_label = ttk.Label(plot_container, textvariable = type_text, font = font_, background = background_)\ntype_label.grid(row = 0, column = 0, sticky = ('N'), padx = 2, pady = 5)\n\n#vert scale\nsignal_lvl = tk.IntVar()\nsignal_scale = tk.Scale(root, from_ = 100, to = -100, bg = 'white', length = 250, variable = signal_lvl)\nsignal_scale.place(x = 25, y = 140)\nsignal_lvl.set(25)\n\nup_button = tk.Button(root, text = '+', command = scale_up, font = font_, bg = 'white', width = 2)\nup_button.place(x = 25, y = 100)\n\ndown_button = tk.Button(root, text = '-', command = scale_down, font = font_, bg = 'white', width = 2)\ndown_button.place(x = 25, y = 395)\n\n#Signal frequency\nsignal_freq_lbl = ttk.Label(root, text = 'Частота (fc)', font = font_)\nsignal_freq_lbl.grid(row = 0, column = 1, sticky = ('E', 'S'), padx = (40, 5), pady = 5)\nsignal_freq = tk.IntVar()\nsignal_freq.set(2)\nsignal_freq_box = tk.Spinbox(root, from_ = 1, to = 100, textvariable = signal_freq, font = font_, foreground = foreground_, command = signal_freq_change)\nsignal_freq_box.grid(row = 0, column = 2, sticky = ('E', 'W', 'S'), padx = 2, pady = 5)\nsignal_freq_box.bind('', func)\n\n#Signal amplitude\nsignal_amp_lbl = ttk.Label(root, text = 'Амплитуда', font = font_)\nsignal_amp_lbl.grid(row = 1, column = 1, sticky = 'E', padx = (40, 5))\nsignal_amp = tk.IntVar()\nsignal_amp.set(25)\nsignal_amp_box = tk.Spinbox(root, from_ = 5, to = 95, textvariable = signal_amp, font = font_, foreground = foreground_, command = signal_amp_change, increment = 5.0)\nsignal_amp_box.grid(row = 1, column = 2, sticky = ('E', 'W'), padx = 2, pady = 5)\nsignal_amp_box.bind('', func)\n\n\n#Carrying frequency\ncarry_freq_lbl = ttk.Label(root, text = 'Частота (fн)', font = font_)\ncarry_freq_lbl.grid(row = 3, column = 1, sticky = ('E', 'S'), padx = (40, 5), pady = 5)\ncarry_freq = tk.IntVar()\ncarry_freq.set(40)\ncarry_freq_box = tk.Spinbox(root, from_ = 10, to = 100, textvariable = carry_freq, font = font_, foreground = foreground_, command = carry_freq_change, increment = 5.0)\ncarry_freq_box.grid(row = 3, column = 2, sticky = ('E', 'W', 'S'), padx = 2, pady = 5)\ncarry_freq_box.bind('', carry_freq_change)\n\n#Carrying amplitude\ncarry_amp_lbl = ttk.Label(root, text = 'Амплитуда', font = font_)\ncarry_amp_lbl.grid(row = 4, column = 1, sticky = ('E'), padx = (40, 5), pady = 5)\ncarry_amp = tk.IntVar()\ncarry_amp.set(50)\ncarry_amp_box = tk.Spinbox(root, from_ = 5, to = 100, textvariable = carry_amp, font = font_, foreground = foreground_, command = carry_amp_change, increment = 5.0, state = 'disable')\ncarry_amp_box.grid(row = 4, column = 2, sticky = ('E', 'W'), padx = 2, pady = 5)\ncarry_amp_box.bind('', func)\n\n#variables\ns_frq = int(signal_freq.get())\ns_amp = int(signal_amp.get())\n\nc_frq = int(carry_freq.get())\nc_amp = int(carry_amp.get())\n#c_width = int(carry_width.get())/100\n\n#plotting\nfig = plt.Figure(figsize=(12, 10))\nx = np.arange(0, 10, 0.01)\n\ndef s_ani(i):\n if (max(np.sin((x-i/50.0)*s_frq)*s_amp) + signal_lvl.get()) < 99 and (min(np.sin((x+i/50.0)*s_frq)*s_amp) + signal_lvl.get()) > -99:\n s_line.set_ydata(np.sin((x-i/50.0)*s_frq)*s_amp + signal_lvl.get())\n elif signal_lvl.get() > 0:\n s_line.set_ydata(np.sin((x-i/50.0)*s_frq)*s_amp + 99 - s_amp)\n else:\n s_line.set_ydata(np.sin((x-i/50.0)*s_frq)*s_amp - 99 + s_amp)\n return s_line,\n\ndef c_ani(i):\n c_line.set_ydata(np.sin((x-i/50.0)*c_frq)*c_amp)\n return c_line,\n\ndef m_ani(i):\n\n y_envelope_up = (np.sin((x-i/50.0)*s_frq)*s_amp)\n y_envelope_down = (np.sin((x-i/50.0)*s_frq+np.pi)*s_amp)\n\n if (max(y_envelope_up) + signal_lvl.get()) < 99 and (min(y_envelope_up) + signal_lvl.get()) > -99:\n y_envelope_up = y_envelope_up + signal_lvl.get()\n y_envelope_down = y_envelope_down - signal_lvl.get()\n elif signal_lvl.get() > 0:\n y_envelope_up = y_envelope_up + 99 - s_amp\n y_envelope_down = y_envelope_down - 99 + s_amp\n else:\n y_envelope_up = y_envelope_up - 99 + s_amp\n y_envelope_down = y_envelope_down + 99 - s_amp\n \n cm_line.set_ydata(np.sin((x-i/50.0)*c_frq)*c_amp*(y_envelope_up/(s_amp*2)))\n sm_line.set_ydata(y_envelope_up)\n #sm_line.set_color('red')\n spm_line.set_ydata(y_envelope_down)\n return cm_line, sm_line, spm_line#, subsignal_label\n\ndef normalize(ax):\n ax.set_xlim(0, 10)\n ax.set_ylim(-100, 100)\n ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n #ax.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_label_coords(0.95, 0.45) \n ax.set_xlabel('время')\n ax.set_ylabel('амплитуда')\n\ncanvas = FigureCanvasTkAgg(fig, master=plot_container)\ncanvas.get_tk_widget().grid(row=1,column=0)\n\ns_ax = fig.add_subplot(311)\nc_ax = fig.add_subplot(312)\nm_ax = fig.add_subplot(313)\nroot.update()\n\ny_signal = np.sin((x)*s_frq)*s_amp\ny_max = max(y_signal)\ny_carry = np.sin((x)*c_frq)*c_amp\ns_line, = s_ax.plot(x, y_signal, 'k')\nc_line, = c_ax.plot(x, y_carry)\n\ny_signal = (np.sin((x)*s_frq)*s_amp)+y_max\ny_max = max(y_signal)\ny_carry = y_carry*(y_signal/y_max)\ncm_line, = m_ax.plot(x, y_carry, 'k')\nspm_line, = m_ax.plot(x, y_signal, 'b', linewidth=2)\nsm_line, = m_ax.plot(x, y_signal, 'r', linewidth=2)\n\ns_ax.cla()\nc_ax.cla()\nm_ax.cla()\n\nnormalize(s_ax)\nnormalize(c_ax)\nnormalize(m_ax)\n\ns_ax.legend([s_line], ['Сигнал с fс'], loc = 'upper center', frameon=False)\nc_ax.legend([c_line], ['Несущее колебание c fн'], loc = 'upper center', frameon=False)\nm_ax.legend([cm_line, sm_line, spm_line], ['Амплитудно модулированный сигнал\\n(модулированное по амплитуде колебание)', 'Верхняя огибающая', 'Нижняя огибающая'], ncol = 3, loc = 'upper center', frameon=False)\n#c_repeatable_point = int(250 * c_frq / 10)\n\na1 = animation.FuncAnimation(fig, s_ani, np.arange(1, 315), interval=20, blit=True)\na2 = animation.FuncAnimation(fig, c_ani, np.arange(1, 127), interval=20, blit=True)\na3 = animation.FuncAnimation(fig, m_ani, np.arange(1, 315), interval=20, blit=True)\nroot.mainloop()","sub_path":"AM.py","file_name":"AM.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307307271","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Created on 2017-04-27 00:33:51\n# Project: lyric2\n\nfrom pyspider.libs.base_handler import *\n\n\nclass Handler(BaseHandler):\n crawl_config = {\n }\n\n @every(minutes=24 * 60)\n def on_start(self):\n\n list = ['http://music.baidu.com/search?key=%E7%8E%8B%E5%8A%9B%E5%AE%8F','http://music.baidu.com/search/song?s=1&key=%E5%91%A8%E6%9D%B0%E4%BC%A6','http://music.baidu.com/search?key=%E6%BD%98%E7%8E%AE%E6%9F%8F','http://music.baidu.com/search?key=%E6%9E%97%E4%BF%8A%E6%9D%B0']\n\n for i in list:\n self.crawl(i, callback=self.index_page,fetch_type='js')\n\n @config(age=10 * 24 * 60 * 60)\n def index_page(self, response):\n for each in response.doc('span.song-title > a').items():\n self.crawl(each.attr.href,fetch_type='js',callback=self.detail_page)\n print(\"抓了一首歌\")\n for each in response.doc('a.page-navigator-next').items():\n self.crawl(each.attr.href,fetch_type='js',callback=self.index_page)\n\n\n\n @config(priority=2)\n def detail_page(self, response):\n a = 'div.song-info > div.info-holder.clearfix > ul > li:nth-child(2) > a'\n if len(response.doc(a).text()) <1:\n a = 'div.song-info > div.info-holder.clearfix > ul > li:nth-child(3) > a'\n\n if len(response.doc('div.song-info > div.play-holder.clearfix > div > h2').text())>0:\n return {\n \"alblum\": response.doc(a).text(),\n \"titles\": response.doc('div.song-info > div.play-holder.clearfix > div > h2').text(),\n \"content\": response.doc('#lyricCont').text(),\n \"author\":(response.doc('.author_list').text()).split()[0],\n }\n\n def index_page2(self, response):\n return \"我被执行了\"\n","sub_path":"05.lyric_analysis/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"351499815","text":"import asyncio\nimport functools\nimport uuid\nfrom typing import Callable\n\nimport httpx\n\nfrom prefect_server.configuration import config\nfrom prefect_server.utilities.logging import get_logger\n\nsens_o_matic_httpx_client = httpx.AsyncClient()\n\n__all__ = (\"emit_delete_event\", \"register_delete\", \"sens_o_matic_httpx_client\")\n\n\nasync def emit_delete_event(row_id: str, table_name: str) -> dict:\n env = config.env\n logger = get_logger(\"sens-o-matic\")\n\n if env == \"local\":\n return None\n\n try:\n event_id = str(uuid.uuid4())\n payload = {\"cloud_environment\": env, \"row_id\": row_id, \"table_name\": table_name}\n event = {\n \"id\": event_id,\n \"source\": \"prefect_server\",\n \"type\": \"delete\",\n \"payload\": payload,\n }\n result = await sens_o_matic_httpx_client.post(\n \"https://sens-o-matic.prefect.io/\",\n json=event,\n headers={\"X-PREFECT-EVENT\": \"prefect_server-0.0.1\"},\n timeout=10,\n )\n logger.debug(\"Delete event sent to sens-o-matic: %s\", str(result))\n except Exception as e:\n # Log the information that we were trying to send to sens-o-matic so Dylan can backfill\n logger.error(\n \"Error during attempt to send event to sens-o-matic: %s\", str(event)\n )\n logger.error(e)\n\n return event\n\n\ndef register_delete(table_name: str, id_key: str) -> Callable:\n \"\"\"\n Decorator for a graphql resolver for automatically emitting delete events to the sens-o-matic.\n\n Parameters\n ----------\n table_name : str\n The name of the table from which a row was just deleted\n id_key : str\n The key in the GraphQL input argument containing the id field for the deleted row\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n async def wrapper(obj, info, input):\n result = await func(obj, info, input)\n successful = result.get(\"success\", False)\n\n if not successful:\n return result\n\n deleted_row_id = input.get(id_key, None)\n\n asyncio.create_task(\n emit_delete_event(row_id=deleted_row_id, table_name=table_name)\n )\n\n return result\n\n return wrapper\n\n return decorator\n","sub_path":"src/prefect_server/utilities/sens_o_matic_events.py","file_name":"sens_o_matic_events.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"367229205","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport struct\n\nfrom neutron.agent.common import utils\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom ryu.lib import addrconv\n\nfrom dragonflow._i18n import _LE\nfrom dragonflow.common import exceptions\nfrom dragonflow.controller.common import constants as const\nfrom dragonflow.controller.common import cookies\n\nLOG = log.getLogger(__name__)\n\n_aging_cookie = 0\nACTIVE_PORT_DETECTION_APP = \\\n \"active_port_detection_app.ActivePortDetectionApp\"\n\n\nAGING_COOKIE_NAME = 'aging'\nAGING_COOKIE_LEN = 1\n\n\ndef ipv4_text_to_int(ip_text):\n try:\n return struct.unpack('!I', addrconv.ipv4.text_to_bin(ip_text))[0]\n except Exception:\n raise exceptions.InvalidIPAddressException(key=ip_text)\n\n\ndef ipv6_text_to_short(ip_text):\n if isinstance(ip_text, unicode):\n ip_text = ip_text.encode('ascii', 'ignore')\n try:\n return list(struct.unpack('!8H', addrconv.ipv6.text_to_bin(ip_text)))\n except Exception:\n raise exceptions.InvalidIPAddressException(key=ip_text)\n\n\ndef set_aging_cookie(c):\n global _aging_cookie\n _aging_cookie = c\n\n\ndef get_aging_cookie():\n return _aging_cookie\n\n\ndef set_aging_cookie_bits(old_cookie, old_cookie_mask):\n return cookies.get_cookie(AGING_COOKIE_NAME, _aging_cookie,\n old_cookie, old_cookie_mask)\n\n\ndef get_xor_cookie(cookie):\n return cookie ^ const.GLOBAL_INIT_AGING_COOKIE\n\n\ndef check_active_port_detection_app():\n apps_list = cfg.CONF.df.apps_list\n if ACTIVE_PORT_DETECTION_APP in apps_list:\n return True\n return False\n\n\ndef delete_conntrack_entries_by_filter(ethertype='IPv4', protocol=None,\n nw_src=None, nw_dst=None, zone=None):\n cmd = ['conntrack', '-D']\n if protocol:\n cmd.extend(['-p', str(protocol)])\n cmd.extend(['-f', ethertype.lower()])\n if nw_src:\n cmd.extend(['-s', nw_src])\n if nw_dst:\n cmd.extend(['-d', nw_dst])\n if zone:\n cmd.extend(['-w', str(zone)])\n\n try:\n utils.execute(cmd, run_as_root=True, check_exit_code=True,\n extra_ok_codes=[1])\n LOG.debug(\"Successfully executed conntrack command %s\", cmd)\n except RuntimeError:\n LOG.exception(_LE(\"Failed execute conntrack command %s\"), cmd)\n","sub_path":"dragonflow/controller/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"203389789","text":"# 11. タブをスペースに置換\n\n# タブ1文字につきスペース1文字に置換せよ.\n# 確認にはsedコマンド,trコマンド,もしくはexpandコマンドを用いよ.\n\nimport os.path\n\ndef make_file_tab_replaced(in_file, out_file):\n with open(in_file, \"r\", encoding=\"utf-8\") as in_f, \\\n open(out_file, \"w\", encoding=\"utf-8\") as out_f:\n for l in in_f.readlines():\n out_f.write(l.replace(\"\\t\", \" \"))\n\nif __name__ == \"__main__\":\n in_file = os.path.join(os.path.dirname(__file__), '../DataSource/hightemp.txt')\n out_file = os.path.join(os.path.dirname(__file__), '../Output/Chapter2/q11.txt')\n make_file_tab_replaced(in_file, out_file)","sub_path":"chapter2/q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395173269","text":"\"\"\"\n.. _ex-report:\n\n================================\nMake an MNE-Report with a Slider\n================================\n\nIn this example, MEG evoked data are plotted in an HTML slider.\n\"\"\"\n\n# Authors: Teon Brooks \n# Eric Larson \n#\n# License: BSD (3-clause)\n\nfrom mne.report import Report\nfrom mne.datasets import sample\nfrom mne import read_evokeds\nfrom matplotlib import pyplot as plt\n\n\ndata_path = sample.data_path()\nmeg_path = data_path + '/MEG/sample'\nsubjects_dir = data_path + '/subjects'\nevoked_fname = meg_path + '/sample_audvis-ave.fif'\n\n###############################################################################\n# Do standard folder parsing (this can take a couple of minutes):\n\nreport = Report(image_format='png', subjects_dir=subjects_dir,\n info_fname=evoked_fname, subject='sample',\n raw_psd=False) # use False for speed here\nreport.parse_folder(meg_path, on_error='ignore', mri_decim=10)\n\n###############################################################################\n# Add a custom section with an evoked slider:\n\n# Load the evoked data\nevoked = read_evokeds(evoked_fname, condition='Left Auditory',\n baseline=(None, 0), verbose=False)\nevoked.crop(0, .2)\ntimes = evoked.times[::4]\n# Create a list of figs for the slider\nfigs = list()\nfor t in times:\n figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,\n show=False))\n plt.close(figs[-1])\nreport.add_slider_to_section(figs, times, 'Evoked Response',\n image_format='png') # can also use 'svg'\n\n# Save the report\nreport.save('my_report.html', overwrite=True)\n","sub_path":"0.22/_downloads/6667cd965941b00682e55684a77ea24b/plot_make_report.py","file_name":"plot_make_report.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81436242","text":"#!/usr/bin/env python\n\n# from pulp import *\nimport itertools\nimport pulp\n\nimport drfh.common\nimport drfh.solution\n\n\ndef _integerify_matrix(origin_matrix):\n nrow, ncol = len(origin_matrix), len(origin_matrix[0])\n for i, j in itertools.product(range(nrow), range(ncol)):\n origin_matrix[i][j] = int(pulp.value(origin_matrix[i][j]))\n return origin_matrix\n\n\ndef _lpvariable_matrix(nrow, ncol):\n raw_matrix = drfh.common.new_matrix(nrow, ncol)\n for i, j in itertools.product(range(nrow), range(ncol)):\n name = \"var:{}:{}\".format(i, j)\n raw_matrix[i][j] = pulp.LpVariable(name, 0, None, cat=\"Integer\")\n return raw_matrix\n\n\ndef ilp_solve(problem):\n \"\"\"\n solve the problem using linear-programing method in the integer space\n\n args:\n problem: a Problem instance\n\n return:\n a Solution instance to the problem\n \"\"\"\n # step 1 declare variables\n resource_matrix = problem.resource_matrix\n demand_matrix = problem.demand_matrix\n host_num = problem.host_num\n user_num = problem.user_num\n resource_num = problem.resource_num\n dominant_vector = problem.dominant_vector\n\n # step 2 define the linear programmming problem\n # 2.1 declare variables\n x_matrix = _lpvariable_matrix(user_num, host_num)\n y_aux = pulp.LpVariable(\"y\", 0, None)\n # 2.2 define subject\n prob = pulp.LpProblem(\"lpn_solve\", pulp.LpMaximize)\n prob += y_aux\n # 2.3 define constraints\n # (1) sum(Rx) <= C\n for n, m in itertools.product(range(host_num), range(resource_num)):\n left = [demand_matrix[j][m] * x_matrix[j][n] for j in range(user_num)]\n prob += pulp.lpSum(left) <= resource_matrix[n][m]\n # (2) sum(u[j, n] * x[j, n]) >= y_aux\n for j in range(user_num): # j for user\n left = [dominant_vector[j] * x_matrix[j][n] for n in range(host_num)]\n prob += pulp.lpSum(left) >= y_aux\n\n # step 3 solve the problem\n prob.solve()\n\n return drfh.solution.Solution(_integerify_matrix(x_matrix))\n","sub_path":"drfh/solver/ilp.py","file_name":"ilp.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"16016494","text":"'''\r\nWrite a program that checks whether a binary tree is symmetric.\r\nHint: The definition of symmetry is recursive.\r\n'''\r\n\r\nclass BinaryTreeNode:\r\n def __init__(self, data=None, left=None, right=None):\r\n self.data = data\r\n self.left = left\r\n self.right = right\r\n\r\ntree = BinaryTreeNode(314, \r\n BinaryTreeNode(6, None, BinaryTreeNode(2, None, BinaryTreeNode(3))), \r\n BinaryTreeNode(6, BinaryTreeNode(2, BinaryTreeNode(3), None), None)) \r\n\r\ntree1 = BinaryTreeNode(314, \r\n BinaryTreeNode(6, None, BinaryTreeNode(1, None, BinaryTreeNode(3))), \r\n BinaryTreeNode(6, BinaryTreeNode(2, BinaryTreeNode(3), None), None))\r\n\r\ntree2 = BinaryTreeNode(314, \r\n BinaryTreeNode(6, None, BinaryTreeNode(2, None, BinaryTreeNode(3, None, BinaryTreeNode(561)))), \r\n BinaryTreeNode(6, BinaryTreeNode(2, BinaryTreeNode(3), None), None))\r\n\r\n'''\r\nMy solution: call preOrderTraverse on tree's left subtree. Store their values and directions\r\ncall reversePreOrderTraverse (right first) on tree's right subtree. And store values\r\ncompare the left_half values/directions with right_half values/direction.\r\nValues should be same, but direction should be opposite.\r\n\r\nTime-complexity: O(n), Space-complexity: O(n)\r\n'''\r\n\r\n'''\r\nimport collections\r\ndef is_symmetric(tree):\r\n left_half, right_half = [], []\r\n NodeValWithDirection = collections.namedtuple('NodeValWithDirection', ('value', 'direction'))\r\n \r\n def preOrderTraverse(node, direction, storage):\r\n if not node:\r\n return\r\n \r\n storage.append(NodeValWithDirection(node.data, direction))\r\n preOrderTraverse(node.left, 'left', storage)\r\n preOrderTraverse(node.right, 'right', storage)\r\n return\r\n \r\n def reversePreOrderTraverse(node, direction, storage):\r\n if not node:\r\n return\r\n \r\n storage.append(NodeValWithDirection(node.data, direction))\r\n preOrderTraverse(node.right, 'right', storage)\r\n preOrderTraverse(node.left, 'left', storage)\r\n return\r\n\r\n preOrderTraverse(tree.left, 'left', left_half)\r\n reversePreOrderTraverse(tree.right, 'right', right_half)\r\n\r\n if len(left_half) != len(right_half):\r\n return False\r\n\r\n for i in range(len(left_half)):\r\n if left_half[i].value != right_half[i].value or left_half[i].direction == right_half[i].direction:\r\n return False\r\n \r\n return True\r\n'''\r\n\r\n'''\r\nEPI Solutions:\r\nApproach: We can swap the tree's left and right subtree and just do a regular DFS.\r\nCompare the values and see if they match. The time-complexity and space-complexity is O(n)\r\n\r\nBut a slightly better algorithm is that we don't need to construct the mirror subtree.\r\nWe only care about whether a pair of subtrees are mirror images. As soon as a pair fails the\r\ntest, we can short circuit the check to false.\r\n\r\nThe time complexity and space complexity are O(n) and O(h), respectively where n is # of nodes,\r\nand h is height of tree\r\n'''\r\n\r\ndef is_symmetric(tree):\r\n def check_symmetric(subtree_0, subtree_1):\r\n if not subtree_0 and not subtree_1:\r\n return True\r\n elif subtree_0 and subtree_1:\r\n return (subtree_0.data == subtree_1.data\r\n and check_symmetric(subtree_0.left, subtree_1.right)\r\n and check_symmetric(subtree_0.right, subtree_1.left))\r\n # One subtree is empty and the other is not.\r\n return False\r\n return not tree or check_symmetric(tree.left, tree.right)\r\nprint(is_symmetric(tree2))","sub_path":"Python/EPI/Binary Trees/is_symmetric.py","file_name":"is_symmetric.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466629231","text":"from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport re\r\nimport os\r\n\r\n# url = 'https://unsplash.com/'\r\n\r\n# options = webdriver.ChromeOptions()\r\n# prefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': r'C:/Users/COLDPLAY/Desktop/wallpaper'}\r\n# options.add_experimental_option('prefs', prefs)\r\n\r\n# driver = webdriver.Chrome(chrome_options=options)\r\n# driver.get('http://www.baidu.com')\r\n# driver = webdriver.PhantomJS()\r\n# driver.get(url)\r\n\r\n# def execute_times(times):\r\n# \tfor i in range(times + 1):\r\n# \t\tdriver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n\r\n# execute_times(10)\r\n\r\n# html = driver.page_source\r\n# links = driver.find_elements_by_link_text('DOWNLOAD')\r\n# for link in links:\r\n# \tlink.click()\r\n\r\n# print(html)\r\n# soup = BeautifulSoup(html, 'lxml')\r\n# links = soup.find_all('a', 'title=\"Download photo\"')\r\n\r\n# for link in links:\r\n# \tprint(link['href'])\r\n\r\n# driver = webdriver.PhantomJS()\r\n# index = 'https://unsplash.com/collections/curated/'\r\n\r\n# page = int(input('我们从哪一页开始:'))\r\n# while page <= 148:\r\n# \tprint('正在下载第' + str(page) + '页的壁纸...')\r\n# \turl = index + str(page)\r\n\r\n# \tdriver.get(url)\r\n# \tlinks = driver.find_elements_by_link_text('DOWNLOAD')\r\n# \tfor link in links:\r\n# \t\tlink.click()\r\n# \t# driver.close()\r\n\r\n# \tpage += 1\r\n\r\nindex = 'https://unsplash.com/collections/curated/'\r\ndriver = webdriver.PhantomJS()\r\n\r\npage = int(input('哪一页开始: '))\r\n\r\nwhile page <= 148:\r\n\tprint('正在下载第' + str(page) + ' 页的壁纸...')\r\n\r\n\turl = index + str(page)\r\n\tdriver.get(url)\r\n\thtml = driver.page_source\r\n\t# with open(r'C:/Users/COLDPLAY/Desktop/wallpaper/lol.jpg', 'wb') as f:\r\n\t# \tf.write(r.content)\r\n\tsoup = BeautifulSoup(html, 'lxml')\r\n\tlinks = soup.find_all('a', title=\"Download photo\")\r\n\r\n\tcount = 1\r\n\tfor link in links:\r\n\t\tlink = link['href']\r\n\r\n\t\tprint('正在下载第' + str(count) + ' 张壁纸...')\r\n\r\n\t\tif os.path.exists('C:/Users/COLDPLAY/Desktop/wallpaper/' + str(page) + '_' + str(count) + '.jpg'):\r\n\t\t\tprint('图片已存在!')\r\n\t\t\tcount += 1\r\n\t\t\tcontinue\r\n\r\n\t\tir = requests.get(link)\r\n\r\n\t\tif ir.status_code == 200:\r\n\t\t\twith open('C:/Users/COLDPLAY/Desktop/wallpaper/' + str(page) + '_' + str(count) + '.jpg', 'wb') as f:\r\n\t\t\t\tf.write(ir.content)\r\n\t\t\tprint('下载完成!')\r\n\t\telse:\r\n\t\t\tprint('下载失败!')\r\n\t\tcount += 1\r\n\r\n\tpage += 1","sub_path":"ProjectCode/wallpaper_spider2.0.py","file_name":"wallpaper_spider2.0.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"197777246","text":"# -*- coding: utf-8 -*-\n\nfrom base import BaseDisambiguator\nfrom . import register_disambiguator\n\n\nclass EntityPriorDisambiguator(BaseDisambiguator):\n def __init__(self, *args, **kwargs):\n super(EntityPriorDisambiguator, self).__init__(*args, **kwargs)\n\n def get_aliases_with_scores(self, mention, document):\n scores = []\n\n for alias in mention.candidates:\n try:\n entity = self.dictionary.get_entity(alias.title)\n scores.append(entity.doc_count)\n\n except KeyError:\n scores.append(0)\n\n return sorted(zip(mention.candidates, scores), key=lambda o: o[1],\n reverse=True)\n\n\nregister_disambiguator(EntityPriorDisambiguator)\n","sub_path":"web/entity-disambi/entity_disambi/disambiguator/entity_prior.py","file_name":"entity_prior.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"355871744","text":"import json\r\n\r\nfrom overwatch import OverwatchCrawler\r\n\r\nif __name__ == '__main__':\r\n crawler = OverwatchCrawler()\r\n heroes = crawler.get_hero_details()\r\n heroes = json.dumps(heroes, ensure_ascii=False)\r\n with open('./heroes.json', 'w', encoding='utf-8') as f:\r\n f.write(heroes)\r\n crawler.finish()\r\n","sub_path":"overwatch_crawler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"463307557","text":"#!/usr/bin/env python3\n\nimport os\nimport json\n\nimport flask\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask import redirect\n\nfrom pprint import pprint\n#import sockjs_flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return \"Hello World!\"\n\n\n@app.route('/api/entitlements/v1/services')\ndef services():\n with open('api_payloads/services.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/rbac/v1/access/')\ndef rbac_access():\n with open('api_payloads/rbac.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/authorized/')\n@app.route('/api/tower-analytics/v0/authorized/')\ndef api_aa_authorized():\n return jsonify({'msg': 'Authorized'})\n\n\n@app.route('/api/tower-analytics/chart30/')\n@app.route('/api/tower-analytics/v0/chart30/')\ndef api_aa_chart30():\n with open('api_payloads/chart30.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/clusters/')\n@app.route('/api/tower-analytics/v0/clusters/')\ndef api_aa_clusters():\n with open('api_payloads/clusters.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/templates/')\n@app.route('/api/tower-analytics/v0/templates/')\ndef api_aa_templates():\n with open('api_payloads/templates.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/template_jobs//')\n@app.route('/api/tower-analytics/v0/template_jobs//')\ndef api_aa_template_jobs(jobid=None):\n with open('api_payloads/template_jobs.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/modules/')\n@app.route('/api/tower-analytics/v0/modules/')\ndef api_aa_modules():\n with open('api_payloads/modules.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/jobs_by_date_and_org_30/')\n@app.route('/api/tower-analytics/v0/jobs_by_date_and_org_30/')\ndef api_aa_jobs_by_date_and_org_30():\n with open('api_payloads/jobs_by_date_and_org_30.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/job_runs_by_org_30/')\n@app.route('/api/tower-analytics/v0/job_runs_by_org_30/')\ndef api_aa_job_runs_by_org_30():\n with open('api_payloads/job_runs_by_org_30.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/job_events_by_org_30/')\n@app.route('/api/tower-analytics/v0/job_events_by_org_30/')\ndef api_aa_job_events_by_org_30():\n with open('api_payloads/job_events_by_org_30.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/roi_templates/')\n@app.route('/api/tower-analytics/v0/roi_templates/')\ndef api_aa_roi_templates():\n with open('api_payloads/roi_templates.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/notifications/')\n@app.route('/api/tower-analytics/v0/notifications/')\ndef api_aa_notificaitons():\n with open('api_payloads/notifications.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/v1/job_explorer/', methods=['GET', 'POST'])\ndef je():\n with open('api_payloads/job_explorer_result.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\n@app.route('/api/tower-analytics/v1/job_explorer_options/', methods=['GET', 'POST'])\ndef je_options():\n with open('api_payloads/job_explorer_options_result.json', 'r') as f:\n data = json.loads(f.read())\n return jsonify(data)\n\n\nif __name__ == '__main__':\n #app.run(ssl_context='adhoc', host='0.0.0.0', port=443, debug=True)\n #server = Server(('0.0.0.0', 443), app.wsgi_app)\n #server.serve_forever()\n #app.run(ssl_context=('cert.pem', 'key.pem'), host='0.0.0.0', port=443, debug=True)\n if os.environ.get('API_SECURE'):\n app.run(ssl_context='adhoc', host='0.0.0.0', port=443, debug=True)\n else:\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"srv/aa_backend_mock/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"362815583","text":"import os, re, sys, time\n\nclass ListProcessor(object):\n def __init__(self, list_name):\n assert list_name\n self.__list_name = list_name\n\n @property\n def __database(self):\n if not hasattr(self, '__database__'):\n self.__database__ = self.__get_database()\n return self.__database__\n\n @property\n def __database_file_path(self):\n if not hasattr(self, '__database_file_path__'):\n self.__database_file_path__ = self.__get_database_file_path()\n return self.__database_file_path__\n\n @property\n def __timestamp(self):\n if not hasattr(self, '__timestamp__'):\n self.__timestamp__ = int(time.time())\n return self.__timestamp__\n\n @property\n def __valid_buckets(self):\n if not hasattr(self, '__valid_buckets__'):\n self.__valid_buckets__ = self.__get_valid_buckets()\n return self.__valid_buckets__\n\n def process(self, *args):\n self.__ensure_database_exists()\n if not args:\n return self.__render()\n if not re.match(r'^[1-9][0-9]*(:[1-9][0-9]*)*$', args[0]):\n return self.__add(message=' '.join(args))\n index = int(args[0]) - 1\n if not args[1:]:\n return self.__render(index)\n operation = args[1]\n if operation in ['d', 'done']:\n return self.__done(index)\n elif operation in ['e', 'edit']:\n return self.__edit(index, ' '.join(args[2:]))\n elif operation in ['r', 'remove']:\n return self.__remove(index)\n else:\n return False\n\n def __add(self, message=None):\n if not message:\n return False\n datum = {\n 'created_timestamp': self.__timestamp,\n 'updated_timestamp': self.__timestamp,\n 'message': message,\n }\n self.__database.setdefault('a', []).append(datum)\n return self.__write_database()\n\n def __done(self, index=0):\n if index < 0:\n return False\n datum = self.__database['a'].pop(index)\n datum['updated_timestamp'] = self.__timestamp\n self.__database.setdefault('d', []).append(datum)\n return self.__write_database()\n\n def __edit(self, index=0, message=None):\n if index < 0 or not message:\n return False\n datum = self.__database['a'][index]\n datum['updated_timestamp'] = self.__timestamp\n datum['message'] = message\n return self.__write_database()\n\n def __ensure_database_exists(self):\n database_file_path = self.__database_file_path\n database_dirname = os.path.dirname(database_file_path)\n if database_dirname and not os.path.isdir(database_dirname):\n os.makedirs(database_dirname)\n if not os.path.isfile(database_file_path):\n open(self.__database_file_path, 'w').close()\n\n def __get_bucket(self, bucket):\n return self.__database.get(bucket, [])\n\n def __get_database(self):\n return self.__read_database()\n\n def __get_database_file_path(self):\n list_name = self.__list_name\n env_var = '%s_DATABASE' % list_name\n if os.getenv(env_var):\n return os.getenv(env_var)\n if os.path.isfile(list_name):\n return list_name\n return os.path.join(\n os.getenv('HOME'),\n 'var',\n 'db',\n list_name\n )\n\n def __get_valid_buckets(self):\n return [\n 'a',\n 'd',\n 'r',\n ]\n\n def __read_database(self):\n database = {}\n with open(self.__database_file_path) as database_file:\n for line in database_file:\n line_parts = line.rstrip(\"\\r\\n\").split(\"\\t\")\n if not line_parts:\n continue\n bucket = line_parts[0]\n if bucket not in self.__valid_buckets:\n continue\n created_timestamp = line_parts[1]\n if not created_timestamp:\n continue\n updated_timestamp = line_parts[2]\n if not updated_timestamp:\n continue\n message = line_parts[3]\n if not message:\n continue\n datum = {\n 'created_timestamp': int(created_timestamp),\n 'updated_timestamp': int(updated_timestamp),\n 'message': message,\n }\n database.setdefault(bucket, []).append(datum)\n return database\n\n def __remove(self, index=0):\n if index < 0:\n return False\n datum = self.__database['a'].pop(index)\n datum['updated_timestamp'] = self.__timestamp\n self.__database.setdefault('r', []).append(datum)\n return self.__write_database()\n\n def __render(self, index=None):\n data = self.__get_bucket('a')\n if not data:\n print('No results')\n return True\n for datum_index, datum in enumerate(data):\n if not index or datum_index == index:\n print('%3d. %s' % (datum_index + 1, datum['message']))\n return True\n\n def __write_database(self):\n with open(self.__database_file_path, 'w') as database_file:\n for bucket, data in self.__database.items():\n for datum in data:\n database_file.write(\"%s\\t%d\\t%d\\t%s%s\" % (\n bucket,\n datum['created_timestamp'],\n datum['updated_timestamp'],\n datum['message'],\n os.linesep\n ))\n return True\n\n\nif __name__ == '__main__':\n list_name = os.getenv('LIST_NAME')\n processor = ListProcessor(list_name)\n result = processor.process(*sys.argv[1:])\n sys.exit(0 if result else 1)\n","sub_path":"home/lib/python/utilities/list_processor.py","file_name":"list_processor.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"213904225","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 26 11:58:58 2020\r\n\r\n@author: Danny Lema\r\n\"\"\"\r\n\r\nx=int(input(\"ingrese el numero a contar:. \"))\r\ncontador=1\r\nacumulador=0\r\nwhile True:\r\n print (contador)\r\n acumulador+=contador\r\n contador+=1\r\n if contador>x:\r\n break\r\nprint(\"la suma de los numeros es: \", acumulador)\r\nprint(\"El promedio de la suma es:.\", acumulador/x)","sub_path":"whileinfinito.py","file_name":"whileinfinito.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"380993800","text":"__author__ = 'junhyeok'\n\nimport numpy as np\nfrom gym import utils\nfrom gym.envs.dart import dart_env\nimport joblib\nimport os\nimport time\n\nclass DartDracoEnv(dart_env.DartEnv, utils.EzPickle):\n def __init__(self):\n self.control_bounds = np.array([[1.0] * 10, [-1.0] * 10])\n self.action_scale = np.array([150, 150, 150, 150, 150,\n 150, 150, 150, 150, 150])\n obs_dim = 16\n\n ground_path = '/Users/junhyeokahn/Repository/PnC/RobotSystem/RobotModel/Ground/ground_terrain.urdf'\n urdf_path = '/Users/junhyeokahn/Repository/PnC/RobotSystem/RobotModel/Robot/Draco/DracoCollision.urdf'\n # dart_env.DartEnv.__init__(self, [ground_path, urdf_path], 30, obs_dim, self.control_bounds, dt=0.001, disableViewer=False)\n dart_env.DartEnv.__init__(self, [ground_path, urdf_path], 1, obs_dim, self.control_bounds, dt=0.001, disableViewer=False)\n\n # collision detector\n try:\n self.dart_world.set_collision_detector(3)\n except Exception as e:\n print('Does not have ODE collision detector, reverted to bullet collision detector')\n self.dart_world.set_collision_detector(2)\n\n # collision detector\n self.dart_world.set_gravity(np.array([0., 0., -9.8]))\n\n utils.EzPickle.__init__(self)\n\n def advance(self, a):\n clamped_control = np.array(a)\n for i in range(len(clamped_control)):\n if clamped_control[i] > self.control_bounds[0][i]:\n clamped_control[i] = self.control_bounds[0][i]\n if clamped_control[i] < self.control_bounds[1][i]:\n clamped_control[i] = self.control_bounds[1][i]\n tau = np.zeros(self.robot_skeleton.ndofs)\n tau[6:] = clamped_control * self.action_scale\n\n self.do_simulation(tau, self.frame_skip)\n\n def _step(self, a):\n pre_state = [self.state_vector()]\n self.advance(a)\n\n alive_bonus = 1.0\n reward = alive_bonus\n\n s = self.state_vector()\n done = not np.isfinite(s).all()\n ob = self._get_obs()\n return ob, reward, done, {}\n\n def _get_obs(self):\n state = np.concatenate([\n self.robot_skeleton.q,\n self.robot_skeleton.dq,\n ])\n return state\n\n def reset_model(self):\n self.dart_world.reset()\n qpos = self.robot_skeleton.q + self.np_random.uniform(low=-.005, high=.005, size=self.robot_skeleton.ndofs)\n qvel = self.robot_skeleton.dq + self.np_random.uniform(low=-.005, high=.005, size=self.robot_skeleton.ndofs)\n print(qpos)\n qpos[2] = 1.4\n\n self.set_state(qpos, qvel)\n\n state = self._get_obs()\n\n return state\n\n def viewer_setup(self):\n if not self.disableViewer:\n self._get_viewer().scene.tb.trans[2] = -5.5\n self._get_viewer().scene.tb.theta = 60\n self._get_viewer().scene.tb.phi= 90\n","sub_path":"dart-env/gym/envs/dart/draco.py","file_name":"draco.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"21180987","text":"# Programa para dizer o seu mês de nascimento\r\n\r\nmeses = ('janeiro','fevereiro','março','abril','maio','junho','julho','agosto','setembro','outubro','novembro','dezembro')\r\nnasc = input('Digite a sua data de nascimento no formato DD-MM-AAAA: ')\r\n\r\n# Na linha abaixo há um slice, onde mostra-se 3:5, que quer dizer que o slice deve buscar o dado entre a posição 3 e 5.\r\n# Neste caso foi dito que o o slice de um número inteiro de DD-MM-AAAA deve ser buscado e realizar a subtração de 1\r\n# para apresentar o mês correto ao usuário. Por isso que usa-se 'indice = int(nasc[3:5])-1'.\r\n# Lembre-se que o número 0 conta.\r\nindice = int(nasc[3:5])-1\r\nmes = meses[indice]\r\n\r\nprint('Você nasceu no mês de',mes)","sub_path":"Curso do Ivan Gomes/Básico/mes de nascimento.py","file_name":"mes de nascimento.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"210395443","text":"class Solution:\n # @param {string} a a number\n # @param {string} b a number\n # @return {string} the result\n def addBinary(self, a, b):\n # Write your code here\n \n # a less efficient way\n # if len(a)>len(b):\n # return self.addBinary(b,a)\n # a=a[::-1]\n # b=b[::-1]\n # arr=[]\n # c=0\n # for i in range(len(a)):\n # s=int(a[i])+int(b[i])+c\n # arr.append(s&1)\n # c=s/2\n # for i in range(len(a),len(b)):\n # s=int(b[i])+c\n # arr.append(s&1)\n # c=s/2\n # if c:\n # arr.append(c)\n # arr=arr[::-1]\n # return ''.join([str(a) for a in arr])\n \n # a more efficient way\n from collections import deque\n if len(a)>len(b):\n return self.addBinary(b,a)\n c,dq=0,deque()\n for i in range(len(a)-1,-1,-1):\n s=int(a[i])+int(b[len(b)-len(a)+i])+c\n dq.appendleft(str(s&1))\n c=s/2\n for i in range(len(b)-len(a)-1,-1,-1):\n s=int(b[i])+c\n dq.appendleft(str(s&1))\n c=s/2\n if c:\n dq.appendleft(str(c))\n return ''.join(dq)\n \n #O(N) O(N)\n","sub_path":"408.py","file_name":"408.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"608476695","text":"# coding=utf-8\nfrom selenium import webdriver\n\ndriver = webdriver.PhantomJS()\n\n#设置窗口大小\n# driver.set_window_size(1920,1080)\n#最大化窗口\ndriver.maximize_window()\n\ndriver.get(\"https://www.v2ex.com/go/python\")\n\n#获取页面源码\n# print(driver.page_source)\nprint(driver.get_cookies())\nprint({i[\"name\"]:i[\"value\"] for i in driver.get_cookies()})\n#页面截屏\ndriver.save_screenshot(\"./v3.png\")\ndriver.quit()","sub_path":"python_spider/code/02_try_phantonjs.py","file_name":"02_try_phantonjs.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"361549952","text":"from numpy import array, zeros\nfrom sympy import symbols, simplify, trigsimp, cos, sin\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, Particle, KanesMethod, kinetic_energy, potential_energy\n\n#Sets up inertial frame as well as frames for each linkage\ninertial_frame = ReferenceFrame('I')\nleg_frame = ReferenceFrame('L')\nbody_frame = ReferenceFrame('B')\n\n#Sets up symbols for joint angles\ntheta1, theta2 = dynamicsymbols('theta1, theta2')\n\n#Orients the leg frame to the inertial frame by angle theta1\n#and the body frame to to the leg frame by angle theta2\nleg_frame.orient(inertial_frame, 'Axis', (theta1, inertial_frame.z))\nbody_frame.orient(leg_frame, 'Axis', (theta2, leg_frame.z))\n\n#Sets up points for the joints and places them relative to each other\nankle = Point('A')\nleg_length = symbols('l_L')\nwaist = Point('W')\nwaist.set_pos(ankle, leg_length*leg_frame.y)\nbody = Point('B')\nbody_length = symbols('l_B')\nbody.set_pos(waist, body_length*body_frame.y)\n\n#Sets up the angular velocities\nomega1, omega2 = dynamicsymbols('omega1, omega2')\n#Relates angular velocity values to the angular positions theta1 and theta2\nkinematic_differential_equations = [omega1 - theta1.diff(),\n omega2 - theta2.diff()]\n\n#Sets up the rotational axes of the angular velocities\nleg_frame.set_ang_vel(inertial_frame, omega1*inertial_frame.z)\nleg_frame.ang_vel_in(inertial_frame)\nbody_frame.set_ang_vel(leg_frame, omega2*inertial_frame.z)\nbody_frame.ang_vel_in(inertial_frame)\n\n#Sets up the linear velocities of the points on the linkages\nankle.set_vel(inertial_frame, 0)\nwaist.v2pt_theory(ankle, inertial_frame, leg_frame)\nwaist.vel(inertial_frame)\nbody.v2pt_theory(waist, inertial_frame, body_frame)\nbody.vel(inertial_frame)\n\n#Sets up the masses of the linkages\nleg_mass, body_mass = symbols('m_L, m_B')\n\n#Defines the linkages as particles\nwaistP = Particle('waistP', waist, leg_mass)\nbodyP = Particle('bodyP', body, body_mass)\n\n#Sets up gravity information and assigns gravity to act on mass centers\ng = symbols('g')\nleg_grav_force_vector = -1*leg_mass*g*inertial_frame.y\nleg_grav_force = (waist, leg_grav_force_vector)\nbody_grav_force_vector = -1*body_mass*g*inertial_frame.y\nbody_grav_force = (body,body_grav_force_vector)\n\n#Sets up joint torques\nankle_torque, waist_torque = dynamicsymbols('T_a, T_w')\nleg_torque_vector = ankle_torque*inertial_frame.z - waist_torque*inertial_frame.z\nleg_torque = (leg_frame, leg_torque_vector)\n\nbody_torque_vector = waist_torque*inertial_frame.z\nbody_torque = (body_frame, body_torque_vector)\n\n#Generalized coordinates\ncoordinates = [theta1, theta2]\n\n#Generalized speeds\nspeeds = [omega1, omega2]\n\n#Create a KanesMethod object\nkane = KanesMethod(inertial_frame, coordinates, speeds, kinematic_differential_equations)\n\nloads = [leg_grav_force,\n body_grav_force,\n leg_torque,\n body_torque]\nbodies = [waistP, bodyP]\n\nfr, frstar = kane.kanes_equations(loads, bodies)\nfrplusfrstar = simplify(trigsimp(fr + frstar))\nmass_matrix = simplify(trigsimp(kane.mass_matrix_full))\n\nforcing_vector = trigsimp(kane.forcing_full)\n\nconstants = [leg_length,\n leg_mass,\n body_length,\n body_mass,\n g]\n#Specified contains the matrix for the input torques\nspecified = [ankle_torque, waist_torque]\n\n#Specifies numerical constants for inertial/mass properties\n#Robot Params\n#numerical_constants = array([1.035, # leg_length[m]\n# 36.754, # leg_mass[kg]\n#\t\t\t 0.85, # body_length[m]\n# 91.61, # body_mass[kg]\n# 9.81] # acceleration due to gravity [m/s^2]\n# )\nnumerical_constants = array([0.75,\n 7.0,\n 0.5,\n 8.0,\n 9.81])\n\n#Set input torques to 0\nnumerical_specified = zeros(2)\n\nparameter_dict = dict(zip(constants, numerical_constants))\n\nke_energy = simplify(kinetic_energy(inertial_frame, waistP, bodyP).subs(parameter_dict))\n\nwaistP.set_potential_energy(leg_mass*g*leg_length*cos(theta1))\n\nbodyP.set_potential_energy(body_mass*g*(leg_length*cos(theta1)+body_length*cos(theta1+theta2)))\n\npe_energy = simplify(potential_energy(waistP, bodyP).subs(parameter_dict))\n","sub_path":"double_pendulum/double_pendulum_setup.py","file_name":"double_pendulum_setup.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"85363561","text":"from bento.distutils.utils \\\n import \\\n _is_setuptools_activated\nif _is_setuptools_activated():\n from setuptools.command.install \\\n import \\\n install as old_install\nelse:\n from distutils.command.install \\\n import \\\n install as old_install\n\nfrom bento._config \\\n import \\\n IPKG_PATH\nfrom bento.installed_package_description \\\n import \\\n InstalledPkgDescription, iter_files\nfrom bento.compat.api \\\n import \\\n relpath\n\nfrom bento.commands.install \\\n import \\\n InstallCommand\nfrom bento.commands.context \\\n import \\\n CmdContext\nfrom bento.commands.options \\\n import \\\n OptionsContext\nfrom bento.commands.wrapper_utils \\\n import \\\n run_cmd_in_context\n\nclass install(old_install):\n def initialize_options(self):\n old_install.initialize_options(self)\n\n def finalize_options(self):\n old_install.finalize_options(self)\n\n def run(self):\n self.run_command(\"build\")\n dist = self.distribution\n\n run_cmd_in_context(InstallCommand, \"install\", [], CmdContext,\n dist.run_node, dist.top_node, dist.pkg)\n\n if self.record:\n self.write_record()\n\n def write_record(self):\n dist = self.distribution\n\n install = InstallCommand()\n options_context = OptionsContext.from_command(install)\n context = CmdContext([], options_context, dist.pkg, dist.run_node)\n if self.record:\n n = context.build_node.make_node(IPKG_PATH)\n ipkg = InstalledPkgDescription.from_file(n.abspath())\n scheme = context.get_paths_scheme()\n ipkg.update_paths(scheme)\n file_sections = ipkg.resolve_paths(src_root_dir=context.build_node.abspath())\n\n fid = open(self.record, \"w\")\n try:\n for kind, source, target in iter_files(file_sections):\n fid.write(\"%s\\n\" % target)\n finally:\n fid.close()\n","sub_path":"bento/distutils/commands/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"393353139","text":"\nimport time\nimport ev3dev.ev3 as ev3\n\nimport mqtt_remote_method_calls as com\n\n\nclass Ev3delegate(object):\n \"\"\" This class is the same as robot_controller but is modified so that\n it does not interfere with other team member projects. This class is\n specifically for the robot, and therefore defines functions that\n control the robots actions. \"\"\"\n\n def __init__(self):\n\n # The code below connects all of the robots sensors and motors to\n # specific variables\n self.mqtt_client = None\n self.running = True\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n\n # Then is assures that all sensors and motors are connected properly\n assert self.color_sensor\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.ir_sensor.connected\n assert self.pixy\n\n def set_mqtt(self, mqtt_client):\n\n # This code establishes a single mqtt_client that can be called in\n # different function/ parts of code.\n self.mqtt_client = mqtt_client\n\n def stop_motors(self):\n self.left_motor.stop(stop_action='brake')\n self.right_motor.stop(stop_action='brake')\n\n def drive_inches(self, inches_target, speed_deg):\n\n # The following code was copied from the robot_controller\n inches_target = inches_target\n motor_turns = inches_target*90\n speed_deg = speed_deg\n\n self.right_motor.run_to_rel_pos(position_sp=motor_turns,\n speed_sp=speed_deg,\n stop_action='brake')\n self.left_motor.run_to_rel_pos(position_sp=motor_turns,\n speed_sp=speed_deg, stop_action='brake')\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def turn_degrees(self, degrees_to_turn, turn_speed_sp):\n\n # The following code was copied from the robot_controller\n self.left_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=(-degrees_to_turn * 4),\n stop_action='brake')\n self.right_motor.run_to_rel_pos(speed_sp=turn_speed_sp,\n position_sp=degrees_to_turn*4,\n stop_action='brake')\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n def drive_shapes(self, sides, fill_color, outline_color):\n # The code below was modified in order for the robot to drive the\n # number of sides that was passed in.\n\n print('made it to drive_shapes')\n # The following code makes the robot drive in a circle if the number\n # of sides is zero\n if sides == 0:\n self.left_motor.run_to_rel_pos(speed_sp=400, position_sp=(-360 *\n 4),\n stop_action='brake')\n self.right_motor.run_to_rel_pos(speed_sp=400,\n position_sp=360*4,\n stop_action='brake')\n self.left_motor.wait_while(ev3.Motor.STATE_RUNNING)\n else:\n turn_amount = 360/sides\n for k in range(sides):\n self.drive_inches(5, 500)\n self.turn_degrees(turn_amount, 500)\n\n # The code below assures that the robot has made it to the code and\n # sends a message back to the pc calling the things_to_draw function\n print('connecting to pc')\n self.mqtt_client.send_message('things_to_draw', [sides, fill_color,\n outline_color])\n\n def loop_forever(self, mqtt_client):\n self.running = True\n while self.running:\n time.sleep(0.1)\n\n # The following code exits the program on the ev3 side when the\n # touch sensor is pressed\n if self.touch_sensor.is_pressed:\n print(\"Goodbye!\")\n ev3.Sound.speak(\"Goodbye\").wait()\n mqtt_client.close()\n break\n\n # The following code stops the motors, beeps, and prints a message\n # when an object is detected within a certain range\n if self.ir_sensor.proximity < 10:\n self.stop_motors()\n ev3.Sound.beep().wait()\n print('Cannot complete drawing')\n time.sleep(1.5)\n time.sleep(0.1)\n\n\ndef main():\n robot = Ev3delegate() # This code establishes the robot as an Ev3delegate\n mqtt_client = com.MqttClient(robot) # This code constructs a mqtt client\n robot.set_mqtt(mqtt_client)\n mqtt_client.connect_to_pc() # This code connects the robot to the pc\n robot.loop_forever(mqtt_client)\n\n\nmain()\n","sub_path":"projects/petersa/ev3_petersa_final_project.py","file_name":"ev3_petersa_final_project.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"217212282","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('../../Assets/Images/lena.jpg', 1)\n\ncv2.imshow(\"src\",img)\nimgInfo = img.shape\n\nheight = imgInfo[0]\nwidth = imgInfo[1]\n\n\nmatShift = np.float32([[0.5, 0, 0], [0, 0.5, 0]]) # 2 3\n\ndst = cv2.warpAffine(img, matShift, (int(height/2), int(width/2) )) # p1: original data; p2: shift mat; p3: image info\n\ncv2.imshow('dst', dst)\n\ncv2.waitKey(0)","sub_path":"python/image_scale.py","file_name":"image_scale.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"599965751","text":"\n\nfrom xai.brain.wordbase.verbs._suffice import _SUFFICE\n\n#calss header\nclass _SUFFICED(_SUFFICE, ):\n\tdef __init__(self,): \n\t\t_SUFFICE.__init__(self)\n\t\tself.name = \"SUFFICED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"suffice\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_sufficed.py","file_name":"_sufficed.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"153047508","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(0, len(nums) - 1):\n for j in range(i + 1, len(nums)):\n print(j)\n if nums[i] + nums[j] == target:\n List = [i, j]\n return List\n\n\nif __name__ == '__main__':\n print(Solution().twoSum(nums=[3,2,4],target=6))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"461275073","text":"#!/usr/bin/env python3\n#\n# (c) 2019 Yoichi Tanibayashi\n#\n\"\"\"\nTcpCmdServer -- サーバープログラムのベース\n\nクライアントからの要求(コマンド文字列)を受け取り、\n対応するコマンド(関数)を実行する。\n\n* コマンド文字列: コマンド名とパラメータ (スペース区切り)\n\n \"コマンド名 param1 param2 ..\"\n\n* コマンドとパラメータは、関数に渡される前にリストに分解される。\n\n* リプライ: JSON文字列\n\n '{\"rc\": Cmd.RC_*, \"msg\": 任意のメッセージ}'\n\n------------\n各コマンドには、\nCmdServerHandler で即時に実行される関数(FUNC_I)と、\nキューイングされて、CmdServerApp で順に実行される関数(FUNC_Q)を\n登録できる。\n\nFAUNC_I: 複数クライアントからの要求が並列実行される(マルチスレッド)。\nFAUNC_Q: 並列実行されず、必ず順に一つずつ実行される(シングルスレッド)。\n\n\nオブジェクト\n------------\nmain()\n |\n +- CmdServerApp\n |\n +- Cmd\n |\n +- CmdServer\n |\n +- CmdServerHandler\n\nスレッド\n--------\nmain() - CmdServerApp:main() - Cmd.main()\n |\n +- CmdServer.serve_forever()\n | |\n | +- CmdServerHandler.handle()\n |\n +- CmdServerApp.cmd_worker()\n\n\"\"\"\n__author__ = 'Yoichi Tanibayashi'\n__date__ = '2019'\n\n\nimport socketserver\nimport socket\nimport threading\nimport queue\nimport json\nimport time\n\nfrom MyLogger import get_logger\n\n\nclass Cmd:\n \"\"\"\n __init__()を override\n self.add_cmd()でコマンドを登録。\n 最後に、super().__init__() を呼び出す。\n\n コマンドに対応する関数を定義する。\n def cmd_..(self, args):\n :\n return rc, msg\n\n コマンド実行以外の処理は、main(), end()を override\n self._active をフラグとして利用\n\n \"\"\"\n DEF_PORT = 59001\n\n RC_OK = 'OK' # OK .. FUNC_I の場合は、キューイング不要\n RC_NG = 'NG' # NG\n RC_CONT = 'CONTINUE' # FUNC_I 正常終了 .. キューイングして結果を待つ\n RC_ACCEPT = 'ACCEPT' # FUNC_I 正常終了 .. キューイングして結果を待たない\n\n FUNC_I = 'func_i'\n FUNC_Q = 'func_q'\n HELP_STR = 'help'\n\n CMD_HELP = 'help'\n CMD_EXIT = 'exit'\n CMD_SHUTDOWN = 'shutdown9999'\n\n def __init__(self, init_param=None, port=DEF_PORT, debug=False):\n self._dbg = debug\n self._log = get_logger(__class__.__name__, self._dbg)\n self._log.debug('init_param=%s, port=%s', init_param, port)\n\n if port is None:\n self._port = self.DEF_PORT\n else:\n self._port = port\n self._log.debug('_port=%d', self._port)\n\n self._active = True # main()の終了条件に使用\n\n self.add_cmd('sleep', self.cmd_i_sleep, self.cmd_q_sleep, 'sleep')\n self.add_cmd(self.CMD_HELP, self.cmd_i_help, None, 'command help')\n self.add_cmd(self.CMD_EXIT, self.cmd_i_exit, None, 'disconnect')\n self.add_cmd(self.CMD_SHUTDOWN,\n self.cmd_i_shutdown, self.cmd_q_shutdown,\n 'shutdown server')\n\n def main(self):\n \"\"\"\n override\n \"\"\"\n self._log.debug('')\n\n while self._active:\n time.sleep(1)\n\n self._active = False\n self._log.debug('done')\n\n def end(self):\n self._log.debug('')\n self._log.debug('done')\n\n def start(self):\n self._log.debug('')\n self._active = True\n self._log.debug('done')\n\n def stop_main(self):\n \"\"\"\n override:\n main()内の処理を止める。\n その後の終了処理(資源の開放やサブスレッドの終了など)は、\n end() で行う。\n \"\"\"\n self._log.debug('')\n self._active = False\n self._log.debug('done')\n\n def add_cmd(self, name, func_i, func_q, help_str):\n self._log.debug('name=%a, func_i=%a, func_q=%a, help_str=%a',\n name, func_i, func_q, help_str)\n\n try:\n self._cmd\n except AttributeError:\n self._log.debug('create: self._cmd')\n self._cmd = {}\n\n self._cmd[name] = {\n self.FUNC_I: func_i,\n self.FUNC_Q: func_q,\n self.HELP_STR: help_str\n }\n\n def cmd_i_help(self, args):\n \"\"\"\n コマンド一覧\n \"\"\"\n self._log.debug('args=%a', args)\n\n if len(args) >= 2:\n if args[1] in self._cmd:\n msg = self._cmd[args[1]]['help']\n rc = self.RC_OK\n return rc, msg\n else:\n msg = '%s: no such command' % args[1]\n rc = self.RC_NG\n return rc, msg\n\n # command list\n msg = []\n for c in self._cmd:\n msg.append([c, self._cmd[c]['help']])\n\n rc = self.RC_OK\n return rc, msg\n\n def cmd_i_sleep(self, args):\n \"\"\"\n サーバーをスリープさせる。\n クライアントも待たされる。\n\n ここでは、引数の事前チェックのみ。\n \"\"\"\n self._log.debug('args=%a', args)\n\n try:\n sleep_sec = float(args[1])\n except Exception as e:\n rc = self.RC_NG\n msg = '%s: %s: %s' % (args[0], type(e), e)\n else:\n rc = self.RC_CONT\n msg = 'sleep_sec=%s' % sleep_sec\n self._log.debug(msg)\n return rc, msg\n\n def cmd_q_sleep(self, args):\n \"\"\"\n サーバーをさせる。\n クライアントも待たされる。\n\n 事前チェックされたパラメータ(秒数)受け取り、\n 実際にスリープする。\n \"\"\"\n self._log.debug('args=%a', args)\n\n rc = self.RC_OK\n\n sleep_sec = float(args[1])\n msg = '%s: sleep_sec=%s' % (args[0], sleep_sec)\n\n time.sleep(sleep_sec)\n self._log.debug('sleep:done')\n return rc, msg\n\n def cmd_i_exit(self, args):\n \"\"\"\n 接続を切断する。\n \"\"\"\n self._log.debug('args=%a', args)\n return self.RC_OK, None\n\n def cmd_i_shutdown(self, args):\n \"\"\"\n 指定された秒数後にサーバープロセスをシャットダウン。\n クライアントは、待たずに完了。\n\n ここでは、パラメータの事前チェックを行い。\n 受理 (ACCEPT) する。\n\n \"\"\"\n self._log.debug('args=%a', args)\n\n if len(args) == 1:\n return self.RC_ACCEPT, 'sleep_sec=0'\n\n try:\n sleep_sec = float(args[1])\n except Exception as e:\n rc = self.RC_NG\n msg = '%s: %s: %s' % (args[0], type(e), e)\n else:\n rc = self.RC_ACCEPT\n msg = 'sleep_sec=%s' % sleep_sec\n self._log.debug(msg)\n return rc, msg\n\n def cmd_q_shutdown(self, args):\n \"\"\"\n 指定された秒数後にサーバープロセスをシャットダウン。\n クライアントは、待たずに完了。\n\n 指定された秒数スリープし、OK を返すだけ。\n\n メインルーチン (CmdServerApp:main)で、\n コマンド名をキーに判断され、シャットダウン処理が実行される。\n \"\"\"\n self._log.debug('args=%a', args)\n\n rc = self.RC_OK\n\n if len(args) == 1:\n sleep_sec = 0\n else:\n sleep_sec = float(args[1])\n\n msg = '%s: sleep_sec=%s' % (args[0], sleep_sec)\n self._log.debug(msg)\n\n time.sleep(sleep_sec)\n self._log.debug('sleep:done')\n\n return rc, msg\n\n\nclass CmdServerHandler(socketserver.StreamRequestHandler):\n \"\"\"\n override 不要\n \"\"\"\n DEF_HANDLE_TIMEOUT = 3 # sec\n\n EOF = '\\x04'\n\n def __init__(self, req, c_addr, svr):\n self._dbg = svr._dbg\n self._log = get_logger(__class__.__name__, self._dbg)\n self._log.debug('c_addr=%s', c_addr)\n\n self._svr = svr\n\n self._active = False\n self._myq = queue.SimpleQueue()\n\n # 変数名は固定: self.request.recv() のタイムアウト\n self.timeout = self.DEF_HANDLE_TIMEOUT\n self._log.debug('timeout=%s sec', self.timeout)\n\n return super().__init__(req, c_addr, svr)\n\n def setup(self):\n self._log.debug('_active=%s', self._active)\n self._active = True\n self._log.debug('_active=%s', self._active)\n return super().setup()\n\n def finish(self):\n self._log.debug('_active=%s', self._active)\n self._active = False\n self._log.debug('_active=%s', self._active)\n return super().finish()\n\n def set_timeout(self, timeout=DEF_HANDLE_TIMEOUT):\n self._dbg('timeout=%s', timeout)\n self.timeout = timeout\n\n def net_write(self, msg, enc='utf-8'):\n self._log.debug('msg=%a, enc=%s', msg, enc)\n\n if enc != '':\n msg = msg.encode(enc)\n self._log.debug('msg=%a', msg)\n\n try:\n self.wfile.write(msg)\n except Exception as e:\n self._log.warning('%s:%s.', type(e), e)\n\n def send_reply(self, rc, msg=None, cont=False):\n self._log.debug('rc=%a, msg=%a, cont=%s', rc, msg, cont)\n\n if msg is None:\n rep = {'rc': rc}\n else:\n rep = {'rc': rc, 'msg': msg}\n rep_str = json.dumps(rep)\n rep_str += '\\r\\n'\n if not cont:\n rep_str += self.EOF\n self._log.debug('rep_str=%a', rep_str)\n self.net_write(rep_str)\n\n def handle(self):\n self._log.debug('')\n\n while self._active:\n self._log.debug('wait net_data')\n try:\n # in_data = self.rfile.readline().strip()\n # ↓\n # rfile だと、一度タイムアウトすると、\n # 二度と読めない!?\n # ↓\n in_data = self.request.recv(512).strip()\n\n except socket.timeout as e:\n self._log.debug('%s:%s.', type(e), e)\n self._log.debug('_svr._active=%s', self._svr._active)\n if self._svr._active:\n # サーバーが生きている場合は、継続\n continue\n else:\n self.send_reply(Cmd.RC_NG, 'server is dead !')\n break\n except Exception as e:\n self._log.warning('%s:%s.', type(e), e)\n msg = 'error %s:%s' % (type(e), e)\n self.send_reply(Cmd.RC_NG, msg)\n break\n else:\n self._log.debug('in_data=%a', in_data)\n\n if len(in_data) == 0 or in_data == b'\\x04':\n self._log.debug('disconnected')\n break\n\n # decode\n try:\n decoded_data = in_data.decode('utf-8')\n except UnicodeDecodeError as e:\n msg = '%s:%s .. ignored' % (type(e), e)\n self._log.error(msg)\n self.send_reply(Cmd.RC_NG, msg)\n break\n else:\n self._log.debug('decoded_data=%a', decoded_data)\n\n # get args\n args = decoded_data.split()\n self._log.debug('args=%s', args)\n if len(args) == 0:\n msg = 'no command'\n self._log.warning(msg)\n self.send_reply(Cmd.RC_NG, msg)\n break\n\n # check command\n if args[0] not in self._svr._app._cmd._cmd:\n msg = '%s: no such command .. ignored' % args[0]\n self._log.error(msg)\n self.send_reply(Cmd.RC_NG, msg)\n continue\n\n if self._svr._app._cmd._cmd[args[0]][Cmd.FUNC_I] is not None:\n #\n # interactive command\n #\n self._log.info('call %s: %a', Cmd.FUNC_I, args)\n rc, msg = self._svr._app._cmd._cmd[args[0]][Cmd.FUNC_I](args)\n self._log.info('rc=%s, msg=%s', rc, msg)\n\n if args[0] == Cmd.CMD_EXIT:\n self._active = False\n self._log.debug('_active=%s', self._active)\n\n if rc != Cmd.RC_CONT and rc != Cmd.RC_ACCEPT:\n self.send_reply(rc, msg)\n continue\n\n if rc == Cmd.RC_ACCEPT:\n self._myq = None\n\n # check FANC_Q\n if self._svr._app._cmd._cmd[args[0]][Cmd.FUNC_Q] is None:\n msg2 = '%s: %s is None .. ignored' % (args[0], Cmd.FUNC_Q)\n self._log.warning(msg2)\n if msg is None:\n self.send_reply(Cmd.RC_OK, msg2)\n else:\n self.send_reply(Cmd.RC_OK, msg)\n continue\n\n #\n # queuing\n #\n\n # check que size\n qsize = self._svr._app._cmdq.qsize()\n if qsize > 100:\n msg = 'qsize=%d: server busy' % qsize\n self._log.warning(msg)\n self.send_reply(Cmd.RC_NG, msg)\n continue\n\n # put args to queue\n try:\n self._svr._app._cmdq.put((args, self._myq), block=False)\n except Exception as e:\n msg = '%s:%s' % (type(e), e)\n self._log.error(msg)\n self.send_reply(Cmd.RC_NG, msg)\n continue\n\n # if _myq is None (RC_ACCEPT), send reply now\n if self._myq is None:\n self._log.debug('reply queue is None .. send reply')\n self.send_reply(Cmd.RC_OK, msg)\n continue\n\n # wait result from _myq\n self._log.debug('wait result')\n rc, msg = self._myq.get()\n if rc == Cmd.RC_OK:\n self._log.debug('rc=%s, msg=%s', rc, msg)\n else:\n self._log.error('rc=%s, msg=%s', rc, msg)\n\n # send reply\n self.send_reply(rc, msg)\n\n self._log.debug('done')\n\n\nclass CmdServer(socketserver.ThreadingTCPServer):\n \"\"\"\n override 不要\n \"\"\"\n def __init__(self, app, port, debug=False):\n self._dbg = debug\n self._log = get_logger(__class__.__name__, self._dbg)\n self._log.debug('port=%s', port)\n\n self._app = app\n self._port = port\n\n self._active = False\n self.allow_reuse_address = True # Important !!\n\n while not self._active:\n try:\n super().__init__(('', self._port), CmdServerHandler)\n self._active = True\n self._log.info('_active=%s,_port=%s',\n self._active, self._port)\n except PermissionError as e:\n self._log.error('%s:%s.', type(e), e)\n raise\n except OSError as e:\n self._log.error('%s:%s .. retry', type(e), e)\n time.sleep(5)\n except Exception as e:\n self._log.error('%s:%s.', type(e), e)\n raise\n\n self._log.debug('done')\n\n def serve_forever(self):\n self._log.debug('start')\n super().serve_forever()\n self._log.debug('done')\n\n \"\"\"\n def service_actions(self):\n self._log.debug('')\n super().service_actions()\n self._log.debug('done')\n \"\"\"\n\n def end(self):\n self._log.debug('')\n self.shutdown() # serve_forever() を終了させる\n self._active = False # handle()を終了させる\n self._log.debug('done')\n\n\nclass CmdServerApp:\n \"\"\"\n \"\"\"\n def __init__(self, cmd_class, init_param=None, port=Cmd.DEF_PORT,\n debug=False):\n self._dbg = debug\n self._log = get_logger(__class__.__name__, self._dbg)\n self._log.debug('cmd_class=%s, init_param=%s, port=%s',\n cmd_class, init_param, port)\n\n self._cmdq = queue.Queue()\n\n self._cmd = cmd_class(init_param, port, debug=self._dbg)\n self._svr = CmdServer(self, self._cmd._port, self._dbg)\n self._svr_th = threading.Thread(target=self._svr.serve_forever,\n daemon=True)\n self._cmd_worker_th = threading.Thread(target=self.cmd_worker,\n daemon=True)\n\n def cmd_worker(self):\n self._log.debug('')\n\n loop = True\n\n while loop:\n args, repq = self._cmdq.get()\n self._log.info('args=%a', args)\n\n # check and call cmd\n if args[0] in self._cmd._cmd:\n if self._cmd._cmd[args[0]][Cmd.FUNC_Q] is not None:\n\n # call cmd\n self._log.debug('call %s: %a', Cmd.FUNC_Q, args)\n rc, msg = self._cmd._cmd[args[0]][Cmd.FUNC_Q](args)\n\n if rc == Cmd.RC_OK:\n self._log.info('rc=%a, msg=%a', rc, msg)\n else:\n self._log.error('rc=%a, msg=%a', rc, msg)\n else:\n rc = Cmd.RC_NG\n msg = '%s: no such %s .. ignored' % (args[0], Cmd.FUNC_Q)\n self._log.error(msg)\n else:\n rc = Cmd.RC_NG\n msg = '%s: no such command .. ignored' % args[0]\n self._log.error(msg)\n\n if repq is not None:\n self._log.debug('put reply')\n repq.put((rc, msg))\n\n # shutdown check\n if args[0] == Cmd.CMD_SHUTDOWN:\n self._log.info('shutdown !!')\n time.sleep(1)\n break\n\n time.sleep(0.1)\n\n self._cmd.stop_main()\n self._log.debug('done')\n\n def main(self):\n self._svr_th.start()\n self._cmd_worker_th.start()\n\n self._cmd.main()\n\n self._log.debug('done')\n\n def end(self):\n self._log.debug('')\n while not self._cmdq.empty():\n args, repq = self._cmdq.get()\n self._log.debug('args=%s, repq=%s', args, repq)\n if repq is not None:\n repq.put((Cmd.RC_NG, 'terminated'))\n self._svr.end()\n self._cmd.end()\n self._log.debug('done')\n\n\nimport click\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n\n@click.command(context_settings=CONTEXT_SETTINGS,\n help='TCP Server base class')\n@click.option('--port', 'port', type=int,\n help='port number')\n@click.option('--debug', '-d', 'debug', is_flag=True, default=False,\n help='debug flag')\ndef main(port, debug):\n logger = get_logger(__name__, debug)\n logger.debug('port=%s', port)\n\n logger.info('start')\n\n app = CmdServerApp(Cmd, init_param=None, port=port, debug=debug)\n try:\n app.main()\n finally:\n logger.debug('finally')\n app.end()\n logger.info('end')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"TcpCmdServer.py","file_name":"TcpCmdServer.py","file_ext":"py","file_size_in_byte":19098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"146358262","text":"import numpy as np\nfrom position import Location\nfrom astropy.modeling import fitting, models, Parameter, Fittable1DModel\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\n# Assymetric guassian function\nclass AGaussian1D(Fittable1DModel):\n inputs = ('x',)\n outputs = ('y',)\n\n amplitude = Parameter()\n x0 = Parameter()\n l_sig = Parameter()\n r_sig = Parameter()\n\n @staticmethod\n def evaluate(x, amplitude, x0, l_sig, r_sig):\n sig = np.where(x < x0, l_sig, r_sig)\n f = models.Gaussian1D(amplitude, x0, sig)\n return f(x)\n\n\ndef hist_fit_single(rmid):\n hist_file = Location.project_loca + \"result/light_curve/\" + str(rmid) + \\\n \"/cont-hbeta.txt\"\n fit_file = Location.project_loca + \"result/light_curve/\" + str(rmid) + \\\n \"/javelin_lag.txt\"\n fit_img = Location.project_loca + \"result/light_curve/\" + str(rmid) + \\\n \"/lag_fit.eps\"\n hist_data = np.loadtxt(hist_file)\n lag = hist_data[:, 2]\n hist, x = np.histogram(lag, 500)\n fitter = fitting.LevMarLSQFitter()\n func = AGaussian1D(max(hist), x[np.argmax(hist)], 1.0, 1.0)\n fit = fitter(func, x[0: 500], hist, maxiter=10000)\n np.savetxt(fit_file, fit.parameters)\n fig = plt.figure()\n plt.hist(lag, 500)\n plt.plot(x, fit(x))\n fig.savefig(fit_img, format='eps')\n plt.close()\n\n\ndef hist_fit(rmid):\n print(\"Begining time lag hist fit for \" + str(rmid))\n try:\n hist_fit_single(rmid)\n print(\"Finished\")\n except Exception as reason:\n print(str(\"Failed: \") + str(reason))\n","sub_path":"hist_fit.py","file_name":"hist_fit.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"486634531","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport datetime\nimport os\nimport locale\nfrom banking.util import parse_amount, extract_transactions, lxmlreadhtml\n\nlocale.setlocale(locale.LC_ALL, 'sv_SE.UTF-8')\n\n\ndef parse_amexstatement(amexfile):\n \"\"\"extracts statement object from an AmericanExpress .html statement\"\"\"\n html = lxmlreadhtml(amexfile)\n accnr = os.path.basename(amexfile).split('_')[-2]\n selectors = {\n 'table#table-txnsCard0': 'tr.tableStandardText',\n 'table#table-allTxns': 'tr.tableStandardText',\n 'div.transactions-list': 'div'\n }\n table = row_select = None\n for table_select, row_select in selectors.items():\n table = html.cssselect(table_select)\n if table:\n break\n\n raw_datarows = table[0].cssselect(row_select)\n transactions = extract_transactions(raw_datarows, _parse_amex_row)\n\n saldo = html.cssselect('td#colOSBalance div')\n if saldo:\n saldo = -1 * parse_amount(html.cssselect('td#colOSBalance div')[2].text)\n else:\n saldo = -1.\n\n options = html.cssselect('select#viewPeriod option')\n if options:\n selected_option_nr = int(html.cssselect('select#viewPeriod')[0].value)\n selected_option_value = options[selected_option_nr].text.strip()\n fromdate, todate = _parse_amex_dates(selected_option_value)\n else:\n fromdate = todate = datetime.datetime.now()\n\n return {\n 'filename': amexfile,\n 'fromdate': fromdate,\n 'todate': todate,\n 'saldo': saldo,\n 'account': accnr,\n 'transactions': transactions\n }\n\n\ndef _parse_amex_dates(daterangetext):\n fromdate, todate = daterangetext.split(' till ')\n # https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior\n # 'okt 15, 2014'\n df = \"%b %d, %Y\"\n fromdate = datetime.datetime.strptime(fromdate, df).date()\n try:\n todate = datetime.datetime.strptime(todate, df).date()\n except ValueError:\n todate = fromdate + datetime.timedelta(days=30)\n return fromdate, todate\n\n\ndef _parse_amex_row_old(tr):\n transaction = tr.findall('td')\n # https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior\n # '23 okt 2014'\n df = \"%d %b %Y\"\n date = datetime.datetime.strptime(transaction[0].text_content().strip(), df)\n description = _parse_amex_description(transaction[1])\n if transaction[2].text.strip():\n amount = parse_amount(transaction[2].text.strip())\n else:\n amount = -1 * parse_amount(transaction[3].text.strip())\n return {\n 'tdate': date.date(),\n 'full_description': description,\n 'description': description.splitlines()[0],\n 'amount': amount,\n 'runningsaldo': None, # no runningsaldo in amex statements, hence 'None'\n }\n\n\ndef _parse_amex_row(row):\n transaction = row.findall('div')\n if not transaction:\n transaction = row.findall('td')\n if transaction is None or len(transaction) != 4:\n return\n datestr = transaction[0].text_content().strip()\n # https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior\n if len(datestr.split(' ')) == 3:\n # '23 okt 2014'\n date = datetime.datetime.strptime(datestr, \"%d %b %Y\")\n else:\n # '23 okt'\n date = datetime.datetime.strptime(datestr, \"%d %b\")\n date = date.replace(year=2017)\n description = _parse_amex_description(transaction[1])\n if transaction[2].text_content().strip():\n amount = parse_amount(transaction[2].text_content().strip())\n else:\n amount = -1 * parse_amount(transaction[3].text_content().strip())\n return {\n 'tdate': date,\n 'full_description': description,\n 'description': description.splitlines()[0],\n 'amount': amount,\n 'runningsaldo': None, # no runningsaldo in amex statements, hence 'None'\n }\n\n\ndef _parse_amex_description(td):\n description = [e.strip() for e in td.itertext() if e.strip()]\n d = []\n for line in description:\n line = \" \".join([w for w in line.split() if w])\n d.append(line)\n return \"\\n\".join(d)\n\n\ndef get_balance(htmlobjtree):\n recent_activity_elem = htmlobjtree.cssselect('div.summary-title div.data-value')\n recent_activity_str = recent_activity_elem[0].text_content().strip()\n recent_activity_value = parse_amount(recent_activity_str)\n return recent_activity_value\n","sub_path":"banking/institutes/americanexpress/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"20493199","text":"from os import listdir\nfrom string import Template\n\n\ndataset_titles = list(filter(lambda x: x[0] != \".\", listdir(\"projects\") + listdir(\"investigators\")))\n\ntemplate = Template(\"\"\"from functions import examine\n\n\ndef test_$title_no_hyphen():\n assert examine('$title') == 'All good'\n\"\"\")\n\nfor title in dataset_titles:\n with open(\"tests/test_\" + title + \".py\", \"w\") as f:\n\n f.write(template.substitute(title=title, title_no_hyphen=title.replace(\"-\", \"_\")))\n","sub_path":"tests/create_tests.py","file_name":"create_tests.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"37673582","text":"# -*- encoding: utf-8 -*-\n\nimport cv2 as cv\nimport numpy as np\nfrom . import utils\n\nclass Filter :\n\n def __init__(self) :\n \"\"\"Method to override to add parameters to the filter.\n \"\"\"\n pass\n\n def apply(self, src, dst) :\n \"\"\"Method to override to apply the filter.\n\n Pseudocode :\n b, g, r = cv.split(src)\n filter b, g, r composants\n cv.merge((b, g, r), dst)\n \"\"\"\n pass\n\n\nclass RecolorRC(Filter) :\n\n def apply(self, src, dst) :\n \"\"\"Simulate conversion from BGR to RC (red, cyan).\n\n The source and destination images must both be in BGR format.\n Blues and greens are replaced with cyans.\n\n Pseudocode:\n dst.b = dst.g = 0.5 * (src.b + src.g)\n dst.r = src.r\n \"\"\"\n b, g, r = cv.split(src)\n cv.addWeighted(b, 0.5, g, 0.5, 0, b)\n cv.merge((b, b, r), dst)\n\n\nclass RecolorRGV(Filter) :\n\n def apply(self, src, dst) :\n \"\"\"Simulate conversion from BGR to RGV (red, green, value).\n\n The source and destination images must both be in BGR format.\n Blues are desaturated.\n\n Pseudocode:\n dst.b = min(src.b, src.g, src.r)\n dst.g = src.g\n dst.r = src.r\n \"\"\"\n b, g, r = cv.split(src)\n cv.min(b, g, b)\n cv.min(b, r, b)\n cv.merge((b, g, r), dst)\n\nclass RecolorCMV(Filter) :\n\n def apply(self, src, dst) :\n \"\"\"Simulate conversion from BGR to CMV (cyan, magenta, value).\n\n The source and destination images must both be in BGR format.\n Yellows are desaturated.\n\n Pseudocode:\n dst.b = max(src.b, src.g, src.r)\n dst.g = src.g\n dst.r = src.r\n \"\"\"\n b, g, r = cv.split(src)\n cv.max(b, g, b)\n cv.max(b, r, b)\n cv.merge((b, g, r), dst)\n\n\nclass VFuncFilter(Filter) :\n \"\"\"A filter that applies a function to V (or all of BGR).\"\"\"\n \n def __init__(self, vFunc = None, dtype = np.uint8) :\n length = np.iinfo(dtype).max + 1\n self._vLookupArray = utils.createLookupArray(vFunc, length)\n\n def apply(self, src, dst):\n \"\"\"Apply the filter with a BGR or gray source/destination.\"\"\"\n\n srcFlatView = utils.flatView(src)\n dstFlatView = utils.flatView(dst)\n utils.applyLookupArray(\n self._vLookupArray,\n srcFlatView,\n dstFlatView\n )\n\n\nclass VCurveFilter(VFuncFilter):\n \"\"\"A filter that applies a curve to V (or all of BGR).\"\"\"\n\n def __init__(self, vPoints, dtype = np.uint8) :\n super().__init__(\n utils.createCurveFunc(vPoints),\n dtype\n )\n\n\nclass BGRFuncFilter(Filter) :\n \"\"\"A filter that applies different functions to each of BGR.\"\"\"\n\n def __init__(self, vFunc = None, bFunc = None, gFunc = None,\n rFunc = None, dtype = np.uint8) :\n \n length = np.iinfo(dtype).max + 1\n self._bLookupArray = utils.createLookupArray(\n utils.createCompositeFunc(bFunc, vFunc),\n length\n )\n self._gLookupArray = utils.createLookupArray(\n utils.createCompositeFunc(gFunc, vFunc),\n length\n )\n self._rLookupArray = utils.createLookupArray(\n utils.createCompositeFunc(rFunc, vFunc),\n length\n )\n\n def apply(self, src, dst):\n \"\"\"Apply the filter with a BGR source/destination.\"\"\"\n \n b, g, r = cv.split(src)\n utils.applyLookupArray(self._bLookupArray, b, b)\n utils.applyLookupArray(self._gLookupArray, g, g)\n utils.applyLookupArray(self._rLookupArray, r, r)\n cv.merge([b, g, r], dst)\n\n\nclass BGRCurveFilter(BGRFuncFilter):\n \"\"\"A filter that applies different curves to each of BGR.\"\"\"\n\n def __init__(self, vPoints = None, bPoints = None,\n gPoints = None, rPoints = None, dtype = np.uint8):\n super().__init__(\n utils.createCurveFunc(vPoints),\n utils.createCurveFunc(bPoints),\n utils.createCurveFunc(gPoints),\n utils.createCurveFunc(rPoints),\n dtype\n )\n\n\nclass BGRPortraCurveFilter(BGRCurveFilter):\n \"\"\"A filter that applies Portra-like curves to BGR.\"\"\"\n\n def __init__(self, dtype = np.uint8):\n super().__init__(\n vPoints = [(0,0),(23,20),(157,173),(255,255)],\n bPoints = [(0,0),(41,46),(231,228),(255,255)],\n gPoints = [(0,0),(52,47),(189,196),(255,255)],\n rPoints = [(0,0),(69,69),(213,218),(255,255)],\n dtype = dtype\n )\n\n\nclass BGRProviaCurveFilter(BGRCurveFilter):\n \"\"\"A filter that applies Provia-like curves to BGR.\"\"\"\n\n def __init__(self, dtype = np.uint8):\n super().__init__(\n bPoints = [(0,0),(35,25),(205,227),(255,255)],\n gPoints = [(0,0),(27,21),(196,207),(255,255)],\n rPoints = [(0,0),(59,54),(202,210),(255,255)],\n dtype = dtype\n )\n\n\nclass BGRVelviaCurveFilter(BGRCurveFilter):\n \"\"\"A filter that applies Velvia-like curves to BGR.\"\"\"\n\n def __init__(self, dtype = np.uint8):\n super().__init__(\n vPoints = [(0,0),(128,118),(221,215),(255,255)],\n bPoints = [(0,0),(25,21),(122,153),(165,206),(255,255)],\n gPoints = [(0,0),(25,21),(95,102),(181,208),(255,255)],\n rPoints = [(0,0),(41,28),(183,209),(255,255)],\n dtype = dtype\n )\n\n\nclass BGRCrossProcessCurveFilter(BGRCurveFilter):\n \"\"\"A filter that applies cross-process-like curves to BGR.\"\"\"\n\n def __init__(self, dtype = np.uint8):\n super().__init__(\n bPoints = [(0,20),(255,235)],\n gPoints = [(0,0),(56,39),(208,226),(255,255)],\n rPoints = [(0,0),(56,22),(211,255),(255,255)],\n dtype = dtype\n )\n\nclass StrokeEdgesFilter(Filter) :\n\n def __init__(self, blurKsize=7, edgeKsize=5) :\n\n self.blurKsize = blurKsize\n self.edgeKsize = edgeKsize\n\n def apply(self, src, dst):\n\n if self.blurKsize >= 3:\n blurredSrc = cv.medianBlur(src, self.blurKsize)\n graySrc = cv.cvtColor(blurredSrc, cv.COLOR_BGR2GRAY)\n else:\n graySrc = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n\n cv.Laplacian(graySrc, cv.CV_8U, graySrc, ksize=self.edgeKsize)\n normalizedInverseAlpha = (1.0 / 255) * (255 - graySrc)\n channels = cv.split(src)\n\n for channel in channels:\n channel[:] = channel * normalizedInverseAlpha\n\n cv.merge(channels, dst)\n\n\nclass CannyEdgesFilter(Filter) :\n\n def __init__(self, threshold=10, apertureSize=3, overlay=False) :\n self._threshold = threshold\n self._apertureSize = apertureSize\n self._overlay = overlay\n\n def apply(self, src, dst) :\n\n assert src.shape == dst.shape\n\n if utils.isGray(src) :\n gray = src\n else :\n gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n\n cedge = cv.Canny(\n gray,\n self._threshold,\n self._threshold * 3,\n apertureSize=self._apertureSize\n )\n alpha = (1.0 / 255) * (255 - cedge)\n\n channels = cv.split(src)\n\n for channel in channels :\n if self._overlay :\n channel[:] = channel * alpha\n else :\n channel[:] = cedge\n\n cv.merge(channels, dst)\n\n\nclass ThresholdFilter(Filter) :\n\n def __init__(self, threshold=127, otsu=False) :\n self._threshold = threshold\n self._max = 255\n self._otsu = otsu\n\n def apply(self, src, dst) :\n\n if utils.isGray(src) :\n gray = src\n else :\n gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n\n if self._otsu :\n th, thresh = cv.threshold(\n gray,\n 0, self._max,\n cv.THRESH_BINARY + cv.THRESH_OTSU\n ) \n else :\n th, thresh = cv.threshold(\n gray,\n self._threshold, self._max,\n cv.THRESH_BINARY\n )\n\n cv.merge((thresh, thresh, thresh), dst)\n\nclass AdaptiveGaussianThresholdFilter(Filter) :\n\n def __init__(self, size=11, c=2) :\n self._max = 255\n self._size = size\n self._c = c\n\n def apply(self, src, dst) :\n\n if utils.isGray(src) :\n gray = src\n else :\n gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n\n thresh = cv.adaptiveThreshold(\n gray,\n self._max,\n cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY,\n self._size, self._c\n )\n\n cv.merge((thresh, thresh, thresh), dst)\n\nclass AdaptiveMeanThresholdFilter(Filter) :\n\n def __init__(self, size=11, c=2) :\n self._max = 255\n self._size = size\n self._c = 2\n\n def apply(self, src, dst) :\n\n if utils.isGray(src) :\n gray = src\n else :\n gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\n\n thresh = cv.adaptiveThreshold(\n gray,\n self._max,\n cv.ADAPTIVE_THRESH_MEAN_C,\n cv.THRESH_BINARY,\n self._size, self._c\n )\n\n cv.merge((thresh, thresh, thresh), dst)\n\n \nclass GaussianBlurFilter(Filter) :\n\n def __init__(self, kernel=(5,5)) :\n self._kernel = kernel\n\n def apply(self, src, dst) :\n cv.GaussianBlur(src, self._kernel, 0, dst)\n\n \nclass VConvolutionFilter(Filter) :\n \"\"\"A filter that applies a convolution to V (or all of BGR).\"\"\"\n\n def __init__(self, kernel):\n super().__init__()\n self._kernel = kernel\n\n def apply(self, src, dst):\n \"\"\"Apply the filter with a BGR or gray source/destination.\"\"\"\n assert src.shape == dst.shape\n\n cv.filter2D(src, -1, self._kernel, dst)\n\n\nclass SharpenFilter(VConvolutionFilter):\n \"\"\"A sharpen filter with a 1-pixel radius.\"\"\"\n\n def __init__(self):\n kernel = np.array([\n [-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]\n ])\n super().__init__(kernel)\n\n\nclass FindEdgesFilter(VConvolutionFilter):\n \"\"\"An edge-finding filter with a 1-pixel radius.\"\"\"\n\n def __init__(self):\n kernel = np.array([\n [-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]\n ])\n super().__init__(kernel)\n\n\nclass BlurFilter(VConvolutionFilter):\n \"\"\"A blur filter with a 2-pixel radius.\"\"\"\n\n def __init__(self):\n kernel = np.array([\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04],\n [0.04, 0.04, 0.04, 0.04, 0.04]\n ])\n super().__init__(kernel)\n\n\nclass EmbossFilter(VConvolutionFilter):\n \"\"\"An emboss filter with a 1-pixel radius.\"\"\"\n\n def __init__(self):\n kernel = np.array([\n [-2, -1, 0],\n [-1, 1, 1],\n [ 0, 1, 2]\n ])\n super().__init__(kernel)\n","sub_path":"processors/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":11061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"87416683","text":"import datetime\nimport logging\nimport os\nimport traceback\n\nimport discord\nfrom discord.ext import commands\n\nimport Token\nimport bd_verification\nimport messages\nfrom data import mongo_setup\nfrom services import data_service as svc, exceptions\n\nmongo_setup.global_init()\n# TODO: Get this to work properly\nlogging.basicConfig(level=logging.INFO)\n\nbot = commands.Bot(command_prefix=';',\n description='The friendly helping dragon! ^w^',\n command_not_found=\"Command not found\",\n max_message=100000,\n case_insensitive=True)\n\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Game(\n name=\";help for help\",\n start=datetime.datetime.now()\n ))\n print('Logged in as: {}'.format(bot.user.name))\n print('-----------------')\n\n\n@bot.event\nasync def on_member_join(member):\n if svc.should_welcome_in_guild(member.guild.id):\n await member.send(svc.get_welcome_message(member.guild.id))\n if svc.should_show_joining_in_guild(member.guild.id):\n lc = svc.get_log_channel_in_guild(member.guild)\n if lc:\n await lc.send(embed=messages.create_member_joined_embed(member))\n\n\n@bot.event\nasync def on_member_remove(member):\n if svc.should_show_leaving(member.guild.id):\n lc = svc.get_log_channel_in_guild(member.guild)\n if lc:\n await lc.send(embed=messages.create_member_left_embed(member))\n\n\n@bot.event\nasync def on_guild_join(guild: discord.Guild):\n svc.create_default_guild(guild)\n await guild.system_channel.send(messages.ON_GUILD_JOIN_MESSAGE)\n\n\n@bot.event\nasync def on_error(event, *args, **kwargs):\n await bot.get_user(152543367937392640).send(\"```Error in {}\\n\\n{}```\".format(event,\n traceback.format_exc()))\n\n\n@bot.event\nasync def on_command_error(ctx, error):\n svc.increment_retarded_user(ctx.guild.id)\n\n\nasync def create_invite_with_exc_msg(e, channel):\n il = await channel.create_invite(max_uses=1, max_age=86400, unique=True, reason=\"Needed to reinvite user\")\n embed = discord.Embed(color=0xff0000)\n embed.add_field(name=e, value=il)\n return embed\n\n\n@bot.event\nasync def on_message(message):\n if type(message.channel) != discord.DMChannel and type(message.channel) != discord.GroupChannel:\n if svc.should_save_messages_in_guild(message.guild.id) and not message.author.bot:\n if message.attachments:\n await messages.save_attachments(message)\n message.content = \"[Has attachment]\" + message.content + \"\\n\\tAttachments:\\n\\t\\t\"\n for a in message.attachments:\n message.content += str(a.url.split(\"/\")[-1]) + \" [ID: {}]\\n\\t\\t\".format(a.id)\n svc.increment_message_saved(message.guild.id)\n with open(Token.get_log_path(message), 'a+', encoding=\"utf-8\") as file:\n file.seek(0, os.SEEK_END)\n if not file.tell():\n file.write(messages.USER_FILE_INFO.format(message.author))\n file.seek(0)\n file.write(messages.USER_NEW_MESSAGE_TO_LOG.format(message))\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_message_delete(message: discord.Message):\n if type(message.channel) != discord.DMChannel and type(message.channel) != discord.GroupChannel:\n try:\n if svc.should_show_deleted_in_guild(message.guild.id):\n if not message.author.bot:\n log_channel = svc.get_log_channel_in_guild(message.guild)\n file_channel = discord.utils.get(bot.guilds, name=\"zamirynth\").text_channels[0]\n if log_channel:\n await messages.member_deleted_message(message, log_channel, file_channel)\n except exceptions.UpdateError:\n pass\n if svc.should_save_messages_in_guild(message.guild.id):\n svc.increment_message_deleted(message.guild.id)\n with open(Token.get_log_path(message), 'a+', encoding=\"utf-8\") as file:\n file.seek(0, os.SEEK_END)\n if not file.tell():\n file.write(messages.USER_FILE_INFO.format(message.author))\n file.seek(0)\n file.write(messages.USER_MESSAGE_DELETED_TO_LOG.format(datetime.datetime.utcnow(), message))\n\n\n@bot.event\nasync def on_message_edit(b, a):\n if type(a.channel) != discord.DMChannel and type(a.channel) != discord.GroupChannel:\n if svc.should_show_edited_in_guild(a.guild.id):\n if not a.author.bot and b.content != a.content:\n log_channel = svc.get_log_channel_in_guild(a.guild)\n if log_channel:\n await log_channel.send(embed=messages.member_edited_message(b, a))\n\n if svc.should_save_messages_in_guild(a.guild.id):\n svc.increment_message_edited(a.guild.id)\n with open(Token.get_log_path(a), 'a+', encoding=\"utf-8\") as file:\n file.seek(0, os.SEEK_END)\n if not file.tell():\n file.write(messages.USER_FILE_INFO.format(a.author))\n file.seek(0)\n file.write(messages.USER_MESSAGE_EDITED_TO_LOG.format(b, a))\n await bot.process_commands(a)\n\n\n@bot.command(pass_context=True)\nasync def verify(ctx: discord.ext.commands.Context, *args):\n \"\"\"\n To verify yourself in the server using your date of birth.\n By using this command, you are giving consent to the recording of messages and files sent by you in this server.\n To use this command, you may have to use a password, set by the staff. This password may be in the rules.\n\n Example of this command with a password:\n ;verify password 01/01/0001\n\n Example of this command without a password (if none is set):\n ;verify 01/01/0001\n\n should_verify must be enabled in the settings of the bot (use ;svu to enable it).\n A 'verified' role must have been set (;help svr)\n A channel must be marked a the verification channel by using the ;svc command.\n If a log channel is set (;slc), a message will be posted there.\n \"\"\"\n await ctx.message.delete()\n if svc.should_verify(ctx.guild.id):\n if svc.channel_is_verification(ctx):\n vr = svc.get_verified_role_in_guild(ctx.guild)\n if not vr:\n return await ctx.send(\"There is no role setup for the verification\")\n log_channel = svc.get_log_channel_in_guild(ctx.guild)\n password = svc.get_password_in_guild(ctx.guild.id)\n if password:\n if len(args) < 2:\n return await ctx.send(\"Missing arguments. Have you put the password and your date of birth?\")\n elif len(args) > 2:\n return await ctx.send(\"Too many arguments. Do `;help verify` to learn how to use this command\")\n dob = 1\n if args[0] != password:\n return await ctx.send(\"Password invalid\")\n else:\n if len(args) < 1:\n return await ctx.send(\"Missing arguments. Have you put your date of birth?\")\n elif len(args) > 1:\n return await ctx.send(\"Too many arguments. You do not need to use a password, just put your date \"\n \"of birth in the `DD/MM/YYYY` format\")\n dob = 0\n try:\n bd_verification.verify_birthday(args[dob])\n svc.increment_verified_user(ctx.guild.id)\n await ctx.author.add_roles(vr)\n await ctx.author.send(messages.USER_IS_VERIFIED)\n if log_channel:\n await log_channel.send(embed=messages.member_is_verified(ctx.message, args[dob]))\n except ValueError:\n await ctx.send(messages.INPUT_NOT_VALID.format(\"date\"))\n except bd_verification.Invalid as e:\n await ctx.send(e)\n except bd_verification.Underaged as e:\n svc.increment_underaged_user(ctx.guild.id)\n await ctx.author.send(e)\n if log_channel:\n await log_channel.send(embed=messages.member_is_underaged(ctx.message, args[dob]))\n await ctx.message.author.kick(reason=messages.USER_IS_UNDERAGED.format(\n ctx.message))\n else:\n await ctx.send(\"You cannot use this command in this channel\")\n else:\n await ctx.send(\"This command is not enabled\")\n\n\n@verify.error\nasync def verify_error(ctx, error):\n svc.increment_retarded_user(ctx.guild.id)\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"Missing arguments, please make sure you include all arguments in the command\")\n elif isinstance(error, commands.BotMissingPermissions):\n await ctx.send(\"I don't have the permissions required to do this. (missing: {})\".format(error.missing_perms))\n else:\n await ctx.send(\"There was an unexpected error\")\n\n\n@bot.event\nasync def on_raw_reaction_add(payload: discord.RawReactionActionEvent):\n if payload.user_id != bot.user.id:\n r = svc.get_role_from_payload(payload, bot.get_guild(payload.guild_id))\n if not r:\n return\n u = bot.get_guild(payload.guild_id).get_member(payload.user_id)\n if not u:\n return await bot.get_user(152543367937392640) \\\n .send(\"User not found in `on_raw_reaction_add`\")\n await u.add_roles(r)\n\n\n@bot.event\nasync def on_raw_reaction_remove(payload: discord.RawReactionActionEvent):\n if payload.user_id != bot.user.id:\n r = svc.get_role_from_payload(payload, bot.get_guild(payload.guild_id))\n if not r:\n return\n u = bot.get_guild(payload.guild_id).get_member(payload.user_id)\n if not u:\n return await bot.get_user(152543367937392640) \\\n .send(\"User not found in `on_raw_reaction_remove`\")\n await u.remove_roles(r)\n\n\ndef main():\n extensions = ['cogs.settings',\n 'cogs.moderations',\n 'cogs.fun_stuff'\n ]\n for e in extensions:\n try:\n bot.load_extension(e)\n except Exception as error:\n print(f\"failed to load extension {e}.\")\n traceback.print_exc()\n try:\n token = Token.get_token()\n bot.run(token)\n except FileNotFoundError:\n print(\"Token not found\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"186714810","text":"from expungeservice.models.charge_types.misdemeanor import Misdemeanor\nfrom expungeservice.models.expungement_result import EligibilityStatus\nfrom expungeservice.models.charge_types.sex_crimes import SexCrime, RomeoAndJulietIneligibleSexCrime\nfrom expungeservice.models.helpers.record_merger import RecordMerger\nfrom tests.factories.charge_factory import ChargeFactory\nfrom tests.models.test_charge import Dispositions\nimport pytest\n\n\n@pytest.mark.parametrize(\"sex_crimes_statute\", SexCrime.statutes)\ndef test_sex_crimes(sex_crimes_statute):\n sex_crime_convicted = ChargeFactory.create(\n name=\"Generic\", statute=sex_crimes_statute, level=\"Felony Class B\", disposition=Dispositions.CONVICTED\n )\n assert isinstance(sex_crime_convicted, SexCrime)\n assert sex_crime_convicted.type_eligibility.status is EligibilityStatus.INELIGIBLE\n assert sex_crime_convicted.type_eligibility.reason == \"Ineligible under 137.225(6)(a)\"\n\n\n@pytest.mark.parametrize(\"sex_crimes_statute\", SexCrime.romeo_and_juliet_exceptions)\ndef test_sex_crimes_with_romeo_and_juliet_exception(sex_crimes_statute):\n charges = ChargeFactory.create_ambiguous_charge(\n name=\"Generic\", statute=sex_crimes_statute, level=\"Misdemeanor Class A\", disposition=Dispositions.CONVICTED\n )\n type_eligibility = RecordMerger.merge_type_eligibilities(charges)\n assert isinstance(charges[0], RomeoAndJulietIneligibleSexCrime)\n assert isinstance(charges[1], Misdemeanor)\n assert type_eligibility.status is EligibilityStatus.NEEDS_MORE_ANALYSIS\n assert type_eligibility.reason == \"Failure to meet requirements under 163A.140(1) OR Eligible under 137.225(5)(b)\"\n","sub_path":"src/backend/tests/models/charge_types/test_sex_crimes.py","file_name":"test_sex_crimes.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"494913287","text":"from chunked_uploads.models import Upload\nfrom django.core.management.base import BaseCommand\nimport datetime\n\n\nclass Command(BaseCommand):\n help = 'Clean failed and uncompleted uploads started 3 days ago or before'\n\n def handle(self, *args, **options):\n old = 3\n\n print(\"\\nYou have request to delete all failed and uncompleted uploads\\nstarted \" + str(old) + \" days ago or before.\")\n\n uncompleted_uploads = Upload.objects.filter(state=1)\n\n if len(uncompleted_uploads) == 0:\n print (\"\\nNothing to delete\")\n else:\n #tests if the uncompleted upload is more than 3 days old, if it is : deletes\n for upload in uncompleted_uploads:\n if (datetime.datetime.now() - upload.created_at) > datetime.timedelta(days=old):\n upload.delete()\n\n print (\"\\nDone\")\n","sub_path":"chunked_uploads/management/commands/clean_uploads.py","file_name":"clean_uploads.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118203773","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-np.pi, np.pi, 0.01)\n\nfig = plt.figure()\nfig.suptitle('Main figure title')\n\nax1 = fig.add_subplot(311, title='Subplot 1 title')\nax1.plot(x, np.sin(x))\n\nax2 = fig.add_subplot(312)\nax2.set_title('Subplot 2 title')\nax2.plot(x, np.cos(x))\n\nax3 = fig.add_subplot(313)\nax3.set_title('Subplot 3 title')\nax3.plot(x, np.tan(x))\n\nplt.show()","sub_path":"python/tk/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"492493040","text":"from django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.base import TemplateView\nfrom chat.models import Room, MessageImage\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import JsonResponse, HttpResponse, HttpResponseRedirect\nfrom django.core.cache import cache\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\n\nclass AllRoomsView(TemplateView, LoginRequiredMixin):\n\ttemplate_name = 'room_list.html'\n\nclass NewRoomView(TemplateView, LoginRequiredMixin):\n\ttemplate_name = 'new_room.html'\n\n\n@login_required\ndef room_list(request, room_id):\n\troom = get_object_or_404(Room, id = room_id)\n\tif request.user in room.all_members():\n\t\treturn render(request, 'messages_list.html', {'id': room_id})\n\telse:\n\t\treturn HttpResponseRedirect('/chat/rooms/')\n\n@login_required\ndef ajax_images_sending(request, room_id):\n\tif request.method == 'POST' and request.is_ajax:\n\t\troom = get_object_or_404(Room, id = room_id)\n\t\timages = request.FILES.getlist('images-input')\n\t\tid_list = list()\n\t\tfor image in images:\n\t\t\tpict = MessageImage.objects.create(room = room, image = image)\n\t\t\tid_list.append(pict.id)\n\t\treturn JsonResponse({'id_list':id_list})\n\n@login_required\ndef ajax_get_users(request, room_id):\n\tif request.is_ajax:\n\t\troom = get_object_or_404(Room, id = room_id)\n\t\tusers = room.all_members()\n\t\tif not request.user in users:\n\t\t\treturn HttpResponse(403)\n\t\tusers_list = list()\n\t\tfor user in users:\n\t\t\tonline = cache.get('user_online_' + str(user.id))\n\t\t\tuser_dict = {'first_name': user.first_name, \n\t\t\t\t\t\t'last_name': user.last_name, \n\t\t\t\t\t\t'online': online, \n\t\t\t\t\t\t'id': user.id,\n\t\t\t\t\t\t'avatar': user.chatprofile.avatar_url()}\n\t\t\tusers_list.append(user_dict)\n\t\treturn JsonResponse({'users': users_list})\n\n\n@login_required\ndef ajax_get_rooms(request):\n\tif request.is_ajax:\n\t\trooms = Room.objects.filter(member = request.user)\n\t\trooms_list = list()\n\t\tfor room in rooms:\n\t\t\troom_dict = {'id': room.id, \n\t \t\t\t\t 'title_name': room.title_name(),\n\t \t\t\t\t 'all_members': room.all_members_list(), \n\t \t\t\t\t 'last_message_title': room.last_message_title(request.user)\n\t\t\t}\n\t\t\trooms_list.append(room_dict)\n\t\treturn JsonResponse({'rooms': rooms_list})\n\n@login_required\ndef ajax_search_users(request):\n\tif request.is_ajax:\n\t\tsearchText = request.POST['search']\n\t\tmatched_users = User.objects.filter(Q(first_name__contains = searchText) | Q (last_name__contains = searchText)).exclude(id = request.user.id)\n\t\tusers_list = list()\n\t\tfor user in matched_users:\n\t\t\tusers_list.append(user.chatprofile.user_dict())\n\t\treturn JsonResponse({'matched_users': users_list})\n\n@login_required\ndef ajax_potential_members(request, room_id):\n\tif request.is_ajax:\n\t\troom = get_object_or_404(Room, id = room_id)\n\t\tsearchMember = request.POST['search']\n\t\tmatched = User.objects.filter(Q(first_name__contains = searchMember) | Q (last_name__contains = searchMember)).exclude(id = request.user.id)\n\t\tpotential_members = list()\n\t\tfor m in matched:\n\t\t\tif m not in room.all_members():\n\t\t\t\tpotential_members.append(m.chatprofile.user_dict())\n\t\treturn JsonResponse({'potential_members': potential_members})\n\n@login_required\ndef ajax_create_chat(request):\n\tif request.is_ajax:\n\t\troom = Room.objects.create()\n\t\tid_list = request.POST['ids'].split(',')\n\t\tfor one_id in id_list:\n\t\t\tmember = get_object_or_404(User, id = one_id)\n\t\t\troom.member.add(member)\n\t\troom.member.add(request.user)\n\t\treturn JsonResponse({'room_id': room.id})","sub_path":"chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"284882298","text":"import geomstats.backend as gs\nfrom geomstats.geometry.matrices import Matrices\nfrom geomstats.geometry.spd_matrices import SPDMatrices\nfrom geomstats.geometry.special_orthogonal import SpecialOrthogonal\nfrom geomstats.geometry.symmetric_matrices import SymmetricMatrices\nfrom tests.data_generation import TestData\n\nrand = gs.random.rand\n\n\nclass BackendsTestData(TestData):\n def _array_data(self):\n func_name = \"array\"\n\n args = [\n ([],),\n (1.5,),\n (gs.array(1.5),),\n ([gs.ones(2), gs.ones(2)],),\n ([gs.ones(1), gs.ones(1)],),\n ([gs.ones(2), [0, 0]],),\n ]\n\n return [{\"func_name\": func_name, \"args\": args_} for args_ in args]\n\n def _additional_array_data(self):\n\n data = [\n dict(func_name=\"zeros\", args=(2,)),\n dict(func_name=\"zeros\", args=((2, 2),)),\n dict(func_name=\"ones\", args=(2,)),\n dict(func_name=\"ones\", args=((2, 2),)),\n ]\n\n return data\n\n def array_like_np_test_data(self):\n smoke_data = []\n\n smoke_data += self._array_data()\n smoke_data += self._additional_array_data()\n\n return self.generate_tests(smoke_data)\n\n def _einsum_data(self):\n func_name = \"einsum\"\n\n args = [\n (\"...i,...i->...\", rand(2, 2), rand(2, 2)),\n (\"...i,...i->...\", rand(2, 2), rand(2)),\n (\"...i,...i->...\", rand(2), rand(2, 2)),\n (\"...i,...i->...\", rand(2), rand(2)),\n (\"...,...i->...i\", rand(1), rand(1, 3)),\n (\"...,...i->...i\", rand(1), rand(3)),\n (\"...,...i->...i\", 5.0, rand(1, 3)),\n (\"...,...i->...i\", 5.0, rand(3)),\n (\"...,...i->...i\", rand(3), rand(1, 3)),\n (\"...,...i->...i\", rand(3), rand(3)),\n (\"...ij,...ik->...jk\", rand(3, 2, 2), rand(3, 2, 2)),\n (\"...ij,...ik->...jk\", rand(2, 2), rand(3, 2, 2)),\n (\"...ij,...ik->...jk\", rand(3, 2, 2), rand(2, 2)),\n (\"...ij,...ik->...jk\", rand(2, 2), rand(2, 2)),\n (\"...i,...ijk->...jk\", rand(3), rand(3, 3, 3)),\n (\"...i,...ijk->...jk\", rand(3), rand(1, 3, 3, 3)),\n (\"...i,...ijk->...jk\", rand(2, 3), rand(2, 3, 3, 3)),\n (\"...i,...ijk->...jk\", rand(2, 3), rand(3, 3, 3)),\n (\"...k,...j,...i->...kji\", rand(3), rand(3), rand(3)),\n (\"...k,...j,...i->...kji\", rand(2, 3), rand(3), rand(3)),\n ]\n return [{\"func_name\": func_name, \"args\": args_} for args_ in args]\n\n def _logm_expm_data(self, func_name=\"linalg.logm\"):\n arrays = [\n Matrices.to_diagonal(rand(3, 3)),\n # TODO: uncomment or delete?\n # Matrices.to_symmetric(rand(3, 3)),\n # rand(3, 3),\n ]\n return [dict(func_name=func_name, a=array) for array in arrays]\n\n def func_like_np_test_data(self):\n smoke_data = []\n smoke_data += self._einsum_data()\n\n return self.generate_tests(smoke_data)\n\n def unary_op_like_np_test_data(self):\n smoke_data = [\n dict(func_name=\"trace\", a=rand(2, 2)),\n dict(func_name=\"trace\", a=rand(3, 3)),\n dict(func_name=\"linalg.cholesky\", a=SPDMatrices(3).random_point()),\n dict(func_name=\"linalg.eigvalsh\", a=SymmetricMatrices(3).random_point()),\n ]\n return self.generate_tests(smoke_data)\n\n def unary_op_like_scipy_test_data(self):\n smoke_data = []\n smoke_data += self._logm_expm_data()\n smoke_data += self._logm_expm_data(\"linalg.expm\")\n\n return self.generate_tests(smoke_data)\n\n def unary_op_vec_test_data(self):\n smoke_data = [\n dict(func_name=\"trace\", a=rand(3, 3)),\n dict(func_name=\"linalg.cholesky\", a=SPDMatrices(3).random_point()),\n dict(func_name=\"linalg.eigvalsh\", a=SymmetricMatrices(3).random_point()),\n ]\n smoke_data += self._logm_expm_data()\n smoke_data += self._logm_expm_data(\"linalg.expm\")\n\n return self.generate_tests(smoke_data)\n\n def binary_op_like_np_test_data(self):\n smoke_data = [\n dict(func_name=\"matmul\", a=rand(2, 2), b=rand(2, 2)),\n dict(func_name=\"matmul\", a=rand(2, 3), b=rand(3, 2)),\n dict(func_name=\"outer\", a=rand(3), b=rand(3)),\n dict(func_name=\"outer\", a=rand(3), b=rand(4)),\n dict(func_name=\"dot\", a=rand(3), b=rand(3)),\n dict(func_name=\"cross\", a=rand(3), b=rand(3)),\n ]\n\n return self.generate_tests(smoke_data)\n\n def binary_op_like_einsum_test_data(self):\n smoke_data = [\n dict(func_name=\"matvec\", a=rand(3, 3), b=rand(3), einsum_expr=\"ij,j->i\")\n ]\n\n return self.generate_tests(smoke_data)\n\n def binary_op_vec_test_data(self):\n smoke_data = [\n dict(func_name=\"matmul\", a=rand(3, 4), b=rand(4, 3)),\n dict(func_name=\"matmul\", a=rand(3, 3), b=rand(3, 3)),\n dict(func_name=\"outer\", a=rand(3), b=rand(3)),\n dict(func_name=\"outer\", a=rand(3), b=rand(4)),\n dict(func_name=\"matvec\", a=rand(3, 3), b=rand(3)),\n dict(func_name=\"matvec\", a=rand(4, 3), b=rand(3)),\n dict(func_name=\"dot\", a=rand(3), b=rand(3)),\n dict(func_name=\"cross\", a=rand(3), b=rand(3)),\n ]\n\n return self.generate_tests(smoke_data)\n\n def binary_op_vec_raises_error_test_data(self):\n return self.binary_op_vec_test_data()\n\n def binary_op_raises_error_test_data(self):\n smoke_data = [\n dict(func_name=\"matmul\", a=rand(1), b=rand(1)),\n dict(func_name=\"matmul\", a=rand(2, 3, 3), b=rand(2, 3)),\n dict(func_name=\"matmul\", a=rand(2, 3, 3), b=rand(3, 3, 3)),\n dict(func_name=\"matvec\", a=rand(3, 2), b=rand(3)),\n dict(func_name=\"dot\", a=rand(4), b=rand(3)),\n dict(func_name=\"dot\", a=rand(3, 4), b=rand(3)),\n dict(func_name=\"cross\", a=rand(4), b=rand(4)),\n ]\n\n return self.generate_tests(smoke_data)\n\n def binary_op_runs_test_data(self):\n smoke_data = []\n\n return self.generate_tests(smoke_data)\n\n def _pad_data(self):\n func_name = \"pad\"\n\n n, m = 2, 3\n args = [\n (gs.ones((n, n)), [[0, 1], [0, 1]]),\n (gs.ones((n, n)), [[0, 1], [0, 0]]),\n (gs.ones((m, n, n)), [[0, 0], [0, 1], [0, 1]]),\n ]\n expected = [(n + 1, n + 1), (n + 1, n), (m, n + 1, n + 1)]\n\n return [\n {\"func_name\": func_name, \"args\": args_, \"expected\": expected_}\n for args_, expected_ in zip(args, expected)\n ]\n\n def func_out_shape_test_data(self):\n smoke_data = []\n\n smoke_data += self._pad_data()\n\n return self.generate_tests(smoke_data)\n\n def func_out_type_test_data(self):\n smoke_data = [\n dict(func_name=\"shape\", args=(gs.ones(3),), expected=tuple),\n ]\n\n return self.generate_tests(smoke_data)\n\n def func_out_bool_test_data(self):\n smoke_data = [\n dict(func_name=\"is_array\", args=[gs.ones(2)], expected=True),\n dict(func_name=\"is_array\", args=([1, 2],), expected=False),\n dict(func_name=\"is_array\", args=(1,), expected=False),\n ]\n\n return self.generate_tests(smoke_data)\n\n def _take_data(self):\n func_name = \"take\"\n\n vec = gs.array([0, 1])\n indices = gs.array([0, 0, 1])\n # mat = gs.array(\n # [\n # [0, 1],\n # [2, 3],\n # ]\n # )\n data = [\n dict(func_name=func_name, args=[vec, indices], expected=indices),\n # TODO: uncomment after test refactor merge\n # dict(func_name=func_name, args=[mat, indices],\n # expected=gs.array([[0, 1]] * 2 + [[2, 3]]),\n # axis=0),\n # dict(func_name=func_name, args=[mat, indices],\n # expected=gs.transpose(gs.array([[0, 2]] * 2 + [[1, 3]])),\n # axis=1),\n # dict(func_name=func_name, args=[mat, 0], expected=gs.array([0, 2]),\n # axis=1)\n ]\n\n return data\n\n def func_out_allclose_test_data(self):\n smoke_data = [\n dict(\n func_name=\"linalg.logm\",\n args=[gs.array([[2.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 4.0]])],\n expected=gs.array(\n [\n [0.693147180, 0.0, 0.0],\n [0.0, 1.098612288, 0.0],\n [0.0, 0.0, 1.38629436],\n ]\n ),\n ),\n dict(\n func_name=\"linalg.logm\",\n args=[gs.array([[1.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 6.0]])],\n expected=gs.array(\n [[0.0, 0.0, 0.0], [0.0, 1.609437912, 0.0], [0.0, 0.0, 1.79175946]]\n ),\n ),\n dict(\n func_name=\"linalg.expm\",\n args=[\n gs.array(\n [[2.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 4.0]],\n )\n ],\n expected=gs.array(\n [\n [7.38905609, 0.0, 0.0],\n [0.0, 20.0855369, 0.0],\n [0.0, 0.0, 54.5981500],\n ]\n ),\n ),\n # TODO: uncomment or delete?\n # dict(\n # func_name=\"linalg.expm\",\n # args=[gs.array([[1.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 6.0]])],\n # expected=gs.array(\n # [\n # [2.718281828, 0.0, 0.0],\n # [0.0, 148.413159, 0.0],\n # [0.0, 0.0, 403.42879349],\n # ],\n # ),\n # ),\n ]\n smoke_data += self._take_data()\n\n return self.generate_tests(smoke_data)\n\n def func_out_equal_test_data(self):\n smoke_data = [\n dict(func_name=\"shape\", args=(1,), expected=()),\n dict(func_name=\"shape\", args=([1, 2],), expected=(2,)),\n dict(func_name=\"shape\", args=(gs.ones(3),), expected=(3,)),\n dict(func_name=\"shape\", args=(gs.ones((3, 3)),), expected=(3, 3)),\n dict(func_name=\"take\", args=[gs.array([0, 1]), 0], expected=0),\n ]\n\n return self.generate_tests(smoke_data)\n\n def compose_with_inverse_test_data(self):\n smoke_data = [\n dict(\n func_name_1=\"linalg.logm\",\n func_name_2=\"linalg.expm\",\n a=Matrices.to_diagonal(rand(3, 3)),\n ),\n dict(\n func_name_1=\"linalg.expm\",\n func_name_2=\"linalg.logm\",\n a=Matrices.to_diagonal(rand(3, 3)),\n ),\n dict(\n func_name_1=\"linalg.logm\",\n func_name_2=\"linalg.expm\",\n a=SpecialOrthogonal(n=3).random_point(2),\n ),\n ]\n return self.generate_tests(smoke_data)\n","sub_path":"tests/data/backends_data.py","file_name":"backends_data.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571141051","text":"import tensorflow as tf\nimport numpy as np\nimport random\n\n# Reading data\ndatas = []\nwith open('datasets/ntu_ml_foundation_hw1/hw1_15_train.dat') as f:\n for line in f:\n line_lst = line.split()\n now_x = np.array(list(map(float, line_lst[:4])), dtype=np.float32)\n now_y = np.array(float(line_lst[4]), dtype=np.float32)\n datas.append((now_x, now_y))\n\nprint('training datasets size:', len(datas))\nprint('===================================================')\n\n\n# Init model\nsess = tf.InteractiveSession()\nW = tf.Variable([.0, .0, .0, .0], tf.float32)\nb = tf.Variable([.0], tf.float32)\nx = tf.placeholder(tf.float32, [4])\nlinear_model = tf.reduce_sum(W * x) + b\n\ntf.global_variables_initializer().run()\n\n\ndef pla_cycle(alpha=1.0):\n input_x = [dt[0] for dt in datas]\n target_y = [dt[1] for dt in datas]\n converge = False\n update_num = 0\n while not converge:\n converge = True\n for i in range(len(input_x)):\n predicted_y = linear_model.eval({x: input_x[i]})\n if (predicted_y > 0 and target_y[i] > 0) \\\n or (predicted_y <= 0 and target_y[i] <= 0):\n continue\n update_num += 1\n converge = False\n tf.assign(W, W + alpha * target_y[i] * input_x[i]).eval()\n tf.assign(b, b + alpha * target_y[i]).eval()\n return update_num\n\n# Perceptron Learning Algorithm: naive cycle (1)\nprint('Naive cycle (1)')\nprint('update_num:', pla_cycle())\nprint('===================================================')\n\n\n# Perceptron Learning Algorithm: naive cycle (0.25)\nprint('Naive cycle (0.25)')\ntf.assign(W, [.0, .0, .0, .0]).eval()\ntf.assign(b, [.0]).eval()\nprint('update_num:', pla_cycle(0.25))\nprint('===================================================')\n\n\n# Pre-determined random cycles (1)\nprint('Pre-determined random cycles (1)')\nwrong_statitic = []\nfor t in range(5):\n tf.assign(W, tf.random_normal([4], stddev=0.01)).eval()\n tf.assign(b, tf.random_normal([1], stddev=0.01)).eval()\n random.shuffle(datas)\n wrong_statitic.append(pla_cycle())\n print('update_num:', wrong_statitic[-1])\nprint('avg update_num:', np.average(wrong_statitic))\nprint('===================================================')\n\n\n# Pre-determined random cycles (0.25)\nprint('Pre-determined random cycles (0.25)')\nwrong_statitic = []\nfor t in range(5):\n tf.assign(W, tf.random_normal([4], stddev=0.01)).eval()\n tf.assign(b, tf.random_normal([1], stddev=0.01)).eval()\n random.shuffle(datas)\n wrong_statitic.append(pla_cycle(0.25))\n print('update_num:', wrong_statitic[-1])\nprint('avg update_num:', np.average(wrong_statitic))\nprint('===================================================')\n","sub_path":"1-0-linear-seperable-pla.py","file_name":"1-0-linear-seperable-pla.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5836855","text":"\"\"\"\noperation.py - the operations which make up the executable words of ActorForth.\n\nINTRO 6 : Named words which implement the behavior of ActorForth. New words\n can be built (presently only global words for 'Any' type) and are\n first class citizens as if they were primitives. Operations are\n stored in various Type dictionaries.\n\"\"\"\n\nimport logging\nfrom typing import Dict, List, Tuple, Callable, Any, Optional, Sequence\nfrom dataclasses import dataclass\nfrom itertools import zip_longest\n\nfrom aftype import AF_Type, AF_Continuation, StackObject, Symbol\n\nfrom stack import Stack\n\nclass SigValueTypeMismatchException(Exception): pass\n\nclass TypeSignature:\n\n def __init__(self, in_seq: Sequence[\"StackObject\"] = None, out_seq: Sequence[\"StackObject\"] = None ):\n if in_seq is None: in_seq = []\n if out_seq is None: out_seq = []\n\n self.stack_in : Stack = Stack(in_seq)\n self.stack_out : Stack = Stack(out_seq)\n\n\n # Produces a mapped type sequence that accounts for \"Any\" types.\n # Will fail an assertion if the types don't match.\n def map_from_input_sig(self, sig: Sequence[StackObject]) -> Sequence[StackObject]:\n result_sig : List[StackObject] = []\n assert len(self.stack_in) <= len(sig), \"Error! In Stack '%s' longer than Sig '%s'.\" % (self.stack_in,sig)\n\n # Iterate over both sequences in reverse.\n in_s : StackObject\n m_s : StackObject\n for in_s, m_s in zip(self.stack_in.contents()[::-1],sig[::-1]):\n # Upgrade Generic types to whatever they're being paired with.\n logging.debug(\"in_s type is '%s' : %s.\" % (type(in_s), in_s) )\n logging.debug(\"m_s type is '%s' : %s.\" % (type(m_s),m_s) )\n\n match_type : AF_Type = m_s.stype\n if match_type.is_generic():\n m_s.stype = in_s.stype\n elif in_s.stype.is_generic():\n in_s.stype = match_type\n\n assert m_s.stype == in_s.stype, \"Error! Input Type '%s' not equivalent to Sig Type '%s' for In Stack = %s matched with Sig %s.\" % (in_s, m_s, self.stack_in, sig)\n\n ### TODO: Confirm stack content here!!!\n\n result_sig.insert(0,m_s)\n return result_sig\n\n\n # Used by the runtime interpreter to check for mathing types for words.\n def match_in(self, stack: Stack) -> bool:\n logging.debug(\"match_in in_s=%s, matching against stack=%s\" % (self.stack_in, stack))\n try:\n result = self.map_from_input_sig(stack.contents())\n logging.debug(\"match_in returns True.\")\n return True\n except AssertionError:\n logging.debug(\"match_in returns False.\")\n return False\n\n\n def __str__(self) -> str:\n out = \"TSig([\"\n for t in self.stack_in.contents():\n out += \"t=%s\" % t.stype.name\n if t.value is not None:\n out += \", v='%s'\" % t.value\n out += ', '\n\n out += \"] -> [\"\n\n for t in self.stack_out.contents():\n out += \"t=%s\" % t.stype.name\n if t.value is not None:\n out += \", v='%s'\" % t.value\n out += ', '\n out += \"])\"\n return out\n\n def __repr__(self):\n return self.__str__() \n\n def __eq__(self, s : object) -> bool:\n if not isinstance(s, TypeSignature): return NotImplemented \n return (self.stack_in == s.stack_in) and (self.stack_out == s.stack_out)\n\n def __lt__(self, s: object) -> bool:\n if not isinstance(s, TypeSignature): return NotImplemented \n # Longest stack_in comes first.\n if self.stack_in.depth() > s.stack_in.depth(): return True\n if self.stack_in.depth() < s.stack_in.depth(): return False\n\n # Go ahead and eliminate equal stacks early on as a likely case.\n if self.stack_in == s.stack_in: return False\n\n # Stacks with the most TypeValues (prioritized top to bottom) come first.\n us = self.stack_in.copy()\n them = s.stack_in.copy()\n while us.depth():\n i = us.pop()\n j = them.pop()\n if i.s_type < j.stype: return True\n if i.s_type > j.stype: return False\n if i.value is None and j.value is None: continue\n if i.value is not None and j.value is None: return True\n if i.value is None and j.value is not None: return False\n\n # We only get here if all the types match and have values.\n # So we're left to comparing values.\n us = self.stack_in.copy()\n them = s.stack_in.copy()\n while us.depth():\n i = us.pop()\n j = them.pop()\n if i.value < j.value : return True\n if i.value > j.value : return False\n\n # We should never get here because we already checked for\n # equal stacks earlier!\n return False\n\n\nOp_name = str\nOperation_def = Callable[[\"AF_Continuation\"],None]\n\nclass Operation:\n\n def __init__(self, name: Op_name, op: Operation_def, words: List[\"Operation\"] = None, sig: TypeSignature = None, symbol: Symbol = None) -> None:\n self.name = name\n self.the_op : Operation_def = op\n self.words : List[\"Operation\"] = words or []\n self.sig : TypeSignature = sig or TypeSignature([],[])\n self.symbol : Symbol = symbol or Symbol()\n\n def add_word(self, op: \"Operation\") -> bool:\n # Should check for valid stack type signature.\n self.words.append(op)\n return True\n\n def __call__(self, cont: \"AF_Continuation\") -> None:\n self.the_op(cont)\n\n def __str__(self) -> str:\n qualified_name = \"Anonymous\"\n try:\n qualified_name = self.the_op.__qualname__\n except AttributeError:\n pass \n result = \"Op{'%s' %s :(%s)\" % (self.name, self.sig, qualified_name)\n #result += \" words=%s\" % str(self.words)\n #result += \" from=%s\" % self.symbol.location\n result += \"}\"\n return result\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def __lt__(self, o: object) -> bool:\n if not isinstance(o, Operation): return NotImplemented \n if self.name < o.name: return True\n return self.sig < o.sig\n\n def short_name(self) -> str:\n return self.name\n\n def check_stack_effect(self, context : Optional[ Stack ] = None, force_composite : bool = False) -> Tuple[ Stack, bool ]:\n \"\"\"\n force_composite is used for compiling new composite words that may not yet have a word \n in their word list so would otherwise appear as primitive words and return the final \n stack effect rather than the starting one which is appropriate when compiling.\n \"\"\"\n logging.debug(\"op: %s with context = %s.\" % (self, context) )\n start_stack : Stack\n match_stack : Stack\n\n # The start_stack is what we're trying to match with.\n if context is None:\n start_stack = self.sig.stack_in.copy()\n logging.debug(\"Use our default input stack signature instead: %s.\" % start_stack)\n else: \n start_stack = context.copy()\n\n # We're matching the start_stack against our input stack.\n match_stack = self.sig.stack_in.copy()\n matches : bool = True\n\n if len(self.sig.stack_in) > len(start_stack):\n logging.error(\"Input stack underrun! Match target len=%s:%s > match candidate len%s:%s\" % (len(self.sig.stack_in), self.sig.stack_in, len(start_stack), start_stack) )\n raise Exception(\"Stack Underrun!\")\n\n generic_map : Dict[\"AF_Type\", \"StackObject\"] = {}\n\n if len(self.words) == 0 and not force_composite:\n logging.debug(\"This is a primitive operation. Just consume, adjust for stack effect.\")\n for i in range(len(match_stack)):\n match = match_stack.pop()\n test = start_stack.pop()\n logging.debug(\"Testing match:%s against test:%s.\" % (match, test))\n\n # Fixup any Generic mappings.\n o : StackObject \n for o in [match, test]:\n if o.stype.is_generic() and generic_map.get(o.stype):\n o = generic_map[o.stype]\n\n # Upgrade Generic types if present.\n for (m,t) in [(match, test), (test,match)]:\n if m.stype.is_generic():\n logging.debug(\"Upgrading %s to test: %s.\" % (m,t))\n generic_map[m.stype] = t\n m = t\n\n # Check against value if match has a value!\n if match.value is not None:\n matches = (match.value == test.value)\n if not matches: \n msg = \"match.value(%s) != test.value(%s)!\" % (match.value, test.value)\n logging.debug(msg)\n raise SigValueTypeMismatchException(\"Value mis-match! %s\" % msg)\n\n # Check if the types match.\n matches = (match.stype == test.stype)\n if not matches: \n msg = \"match.stype(%s) != test.stype(%s)!\" % (match.stype, test.stype) \n logging.debug(msg)\n raise SigValueTypeMismatchException(\"Type mis-match! %s\" % msg)\n \n # Tack on the output stack effect that we're claiming. \n \n for o in self.sig.stack_out.contents():\n # See if we have a generic we need to specialize.\n # NOTE - we may need to loop over this until .get returns nothing\n # in case there's more than one level of mapping. \n # If so beware of infinite loops!\n if generic_map.get(o.stype):\n logging.debug(\"Specializing %s to %s.\" % (o,generic_map[o.stype]))\n o = generic_map[o.stype]\n logging.debug(\"adding output:%s\" % o)\n start_stack.push(o)\n\n logging.debug(\"Returning following stack effect for primitive word: %s, matches = %s.\" % (start_stack, matches))\n return start_stack, matches\n\n # We're composite word so walk through the stack effect for each one.\n logging.debug(\"Composite word so walking through implementation.\")\n last_word : Operation\n for word in self.words:\n if not matches:\n logging.error(\"This probably isn't possible. Broke match on word: %s.\" % last_word)\n raise Exception(\"Stack mis-match for composite word!\")\n last_word = word\n start_stack, matches = word.check_stack_effect(start_stack)\n\n logging.debug(\"Returning output stack: %s with matches = %s.\" % (start_stack, matches))\n return start_stack, matches\n\n\nOp_list = List[Operation]\n\nOp_map = List[Tuple[Sequence[\"StackObject\"],Operation]]\n\ndef op_nop(c: \"AF_Continuation\") -> None:\n pass \n","sub_path":"src/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":10985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"114047943","text":"import cv2\r\nimport numpy as np\r\n\r\ndef guassianBlur(img):\r\n frame=cv2.GaussianBlur(img,(7,7),0)\r\n return frame\r\n\r\ndef rgbTOhsv(img):\r\n hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n return hsv\r\n\r\ndef masking(img,lower,upper):\r\n mask = cv2.inRange(img,lower,upper)\r\n return mask\r\n\r\ndef edgeDetection(img):\r\n edges=cv2.Canny(img,75,150,)\r\n return edges\r\n\r\ndef lineDetection(img):\r\n lines=cv2.HoughLinesP(img,1,np.pi/180,50,maxLineGap=50)\r\n return lines\r\n\r\ndef main():\r\n \r\n video=cv2.VideoCapture(\"road_car_view.mp4\")\r\n\r\n while True:\r\n #reading frame\r\n ret,frame=video.read()\r\n #applying gaussian blur so that getting better result after smoothing image\r\n frame=guassianBlur(frame)\r\n #converting hsv colorspace so that detect whatever color we want from frame\r\n hsv=rgbTOhsv(frame)\r\n #setting lower and upper threshols for color to detect\r\n lower_yellow=np.array([10,94,140])\r\n upper_yellow=np.array([40,255,255])\r\n #creating mask\r\n mask=masking(hsv,lower_yellow,upper_yellow)\r\n #detecting edges\r\n cv2.imshow(\"mask\",mask)\r\n\r\n edges=edgeDetection(mask)\r\n \r\n cv2.imshow(\"edges\",edges)\r\n #making hough lines\r\n lines=lineDetection(edges)\r\n #drawing lines\r\n if lines is not None:\r\n for line in lines:\r\n x1,y1,x2,y2=line[0]\r\n cv2.line(frame, (x1,y1), (x2,y2), (0,255,0), 3)\r\n #showing frame with drawn lines\r\n cv2.imshow(\"frame\",frame)\r\n #exit if ESC press\r\n key=cv2.waitKey(25)\r\n if key==27:\r\n break\r\n #release video from memory \r\n video.release()\r\n cv2.destroyAllWindows()\r\n\r\n#calling main\r\nif __name__==\"__main__\":\r\n main()","sub_path":"DIP project/LaneDetection.py","file_name":"LaneDetection.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"83234746","text":"from collections import Counter\nx = [1,2,3,4,5,6,10]\n\ndef mean(x):\n return sum(x)/len(x)\n\nprint(mean(x))\n\ndef median(v):\n n= len(v)\n sorted_v=sorted(v)\n midpoint = n//2\n \n if n % 2 ==1:\n return sorted_v[midpoint]\n \n else:\n lo = midpoint-1\n hi = midpoint\n return (sorted_v[lo] + sorted_v[hi]) / 2\n \nprint(median(x))\n \ndef quantile(x, p):\n p_index = int(p*len(x))\n return sorted(x)[p_index]\n\nprint(quantile(x, 0.4))\n\n# 최빈수\ndef mode(x):\n counts = Counter(x)\n max_count = max(counts.values())\n return [x_i for x_i, count in counts.items() if count == max_count]\n\n# 모두 1개씩만 들어있다면, 모든 값을 리스트로 출력. dict -> keys(), values(), items()\nprint(mode(x))\n\n\n","sub_path":"PYTHON_Project/Data_Science/chapter5_central_tendencies.py","file_name":"chapter5_central_tendencies.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"177764636","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Optional\n\nfrom polyaxon.auxiliaries import V1PolyaxonInitContainer\nfrom polyaxon.containers.names import (\n INIT_DOCKERFILE_CONTAINER_PREFIX,\n generate_container_name,\n)\nfrom polyaxon.contexts import paths as ctx_paths\nfrom polyaxon.k8s import k8s_schemas\nfrom polyaxon.polypod.common import constants\nfrom polyaxon.polypod.common.containers import sanitize_container\nfrom polyaxon.polypod.common.env_vars import get_run_instance_env_var\nfrom polyaxon.polypod.common.mounts import (\n get_auth_context_mount,\n get_connections_context_mount,\n)\nfrom polyaxon.polypod.common.volumes import get_volume_name\nfrom polyaxon.polypod.specs.contexts import PluginsContextsSpec\nfrom polyaxon.schemas.types import V1DockerfileType\nfrom polyaxon.utils.list_utils import to_list\n\n\ndef get_dockerfile_init_container(\n polyaxon_init: V1PolyaxonInitContainer,\n dockerfile_args: V1DockerfileType,\n contexts: PluginsContextsSpec,\n run_path: str,\n run_instance: str,\n env: List[k8s_schemas.V1EnvVar] = None,\n mount_path: Optional[str] = None,\n) -> k8s_schemas.V1Container:\n env = to_list(env, check_none=True)\n env = env + [get_run_instance_env_var(run_instance)]\n\n volume_name = (\n get_volume_name(mount_path) if mount_path else constants.VOLUME_MOUNT_ARTIFACTS\n )\n mount_path = mount_path or ctx_paths.CONTEXT_MOUNT_ARTIFACTS\n volume_mounts = [\n get_connections_context_mount(name=volume_name, mount_path=mount_path)\n ]\n if contexts and contexts.auth:\n volume_mounts.append(get_auth_context_mount(read_only=True))\n\n container = k8s_schemas.V1Container(\n name=generate_container_name(INIT_DOCKERFILE_CONTAINER_PREFIX),\n image=polyaxon_init.get_image(),\n image_pull_policy=polyaxon_init.image_pull_policy,\n command=[\"polyaxon\", \"docker\", \"generate\"],\n args=[\n \"--build-context={}\".format(dockerfile_args.to_dict(dump=True)),\n \"--destination={}\".format(mount_path),\n \"--copy-path={}\".format(\n ctx_paths.CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(run_path)\n ),\n \"--track\",\n ],\n env=env,\n resources=polyaxon_init.get_resources(),\n volume_mounts=volume_mounts,\n )\n return sanitize_container(container)\n","sub_path":"core/polyaxon/polypod/init/dockerfile.py","file_name":"dockerfile.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495485036","text":"def some(items, k):\n for i in range(len(items)):\n for j in range(len(items)):\n if i!=j:\n if items[i]+items[j] == k:\n return True\n return False\n\n\ndef some2(items, k):\n \"\"\"\n This uses a dictionary to store complements and\n thus has to traverse the list only once to get the\n pair of sums if available.\n \"\"\"\n dic = {}\n for i in items:\n if i in dic:\n print(\"Items: {}, {}\".format(i, dic[i]))\n print(\"Their indicies: {}, {}\".format(items.index(i), items.index(dic[i])))\n return True\n else:\n dic[k-i] = i\n return False\n\n\nitems = [10, 15, 3, 7]\nk = 17\n# print(some(items, k))\nprint(some2(items, k))\n","sub_path":"Daily1.py","file_name":"Daily1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"506372544","text":"import pandas as pd\nimport numpy as np\nimport os\n\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--env_name\", type=str, default=\"CartPole-v0\")\nparser.add_argument(\"--algos\", nargs=\"+\", type=str, default=[\"REINFORCE\", \"PPO\", \"TRPO\"])\nparser.add_argument(\"--steps_int\", type=int, default=1000)\nparser.add_argument(\"--n_steps\", type=int, default=1000000)\nargs = parser.parse_args()\n\n\ndef uniform_sample(x, y, x0, ):\n\tflinear = interpolate.interp1d(x, y)\n\tx_new = np.arange(x0, args.n_steps, args.steps_int)\n\ty_new = flinear(x_new)\n\treturn x_new, y_new\n\ndef plot_algo(env_name: str, algo: str, data):\n\tpath = os.path.join(\"ckpts\", env_name, algo)\n\tdirs = os.listdir(path)\n\t\n\tlogs = []\n\tstep0 = 0\n\n\tfor seed in dirs:\n\t\tlog_path = os.path.join(path, seed, \"logs.csv\")\n\t\tlog = pd.read_csv(log_path)\n\t\tlogs.append(log)\n\t\tstep0 = max(step0, log[\"step\"][0])\n\n\tfor log in logs:\n\t\tstep = log[\"step\"].to_numpy()\n\t\tscore = log[\"score\"].to_numpy()\n\t\tstep, score = uniform_sample(step, score, step0)\n\n\t\tdata[\"steps\"] += list(step)\n\t\tdata[\"scores\"] += list(score)\n\t\tdata[\"algo\"] += [algo] * len(step)\n\t\t\n\n\nif __name__ == \"__main__\":\n\tdata = { \"steps\": [], \"scores\": [], \"algo\": []}\n\n\tfor algo in args.algos:\n\t\tplot_algo(args.env_name, algo, data)\n\n\tdf = pd.DataFrame.from_dict(data)\n\tsns.lineplot(x=data[\"steps\"], y=data[\"scores\"], hue=data[\"algo\"], ci=\"sd\")\n\tplt.xlabel(\"step\")\n\tplt.ylabel(\"score\")\n\tplt.title(args.env_name)\n\tplt.show()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"201073070","text":"\"\"\"\\\nwxSpinCtrl widget configuration\n\n@copyright: 2014-2016 Carsten Grohmann\n@copyright: 2018-2021 Dietmar Schwertberger\n@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY\n\"\"\"\n\n# this contained many styles from wxTextCtrl -> drop on loading\nconfig = {\n 'wxklass': 'wxSpinCtrlDouble',\n 'style_defs': {\n 'wxSP_ARROW_KEYS': { 'desc': _('The user can use arrow keys to change the value.') },\n 'wxSP_WRAP': { 'desc': _('The value wraps at the minimum and maximum.') },\n 'wxALIGN_LEFT': {\n 'desc': _('Same as wxTE_LEFT for wxTextCtrl: the text is left aligned.'),\n 'exclude': 'wxALIGN_CENTRE_HORIZONTAL|wxALIGN_RIGHT',\n 'supported_by': ('wx3',) },\n 'wxALIGN_CENTRE_HORIZONTAL': {\n 'desc': _('Same as wxTE_CENTRE for wxTextCtrl: the text is centered.'),\n 'exclude': 'wxALIGN_LEFT|wxALIGN_RIGHT',\n 'supported_by': ('wx3',) },\n 'wxALIGN_RIGHT': {\n 'desc': _('Same as wxTE_RIGHT for wxTextCtrl: the text is right aligned (this is the default).'),\n 'exclude': 'wxALIGN_LEFT|wxALIGN_CENTRE_HORIZONTAL',\n 'supported_by': ('wx3',) },\n 'wxTE_PROCESS_ENTER': {\n 'desc': _('The control will generate the event wxEVT_TEXT_ENTER (otherwise pressing Enter key is '\n 'either processed internally by the control or used for navigation between dialog controls).'), },\n 'wxTE_PROCESS_TAB': {'obsolete':True},\n 'wxTE_MULTILINE': {'obsolete':True},\n 'wxTE_PASSWORD': {'obsolete':True},\n 'wxTE_READONLY': {'obsolete':True},\n 'wxTE_RICH': {'obsolete':True},\n 'wxTE_RICH2': {'obsolete':True},\n 'wxTE_AUTO_URL': {'obsolete':True},\n 'wxTE_NOHIDESEL': {'obsolete':True},\n 'wxHSCROLL': {'obsolete':True},\n 'wxTE_NO_VSCROLL': {'obsolete':True},\n 'wxTE_LEFT': {'rename_to':'wxALIGN_LEFT'},\n 'wxTE_CENTRE': {'rename_to':'wxALIGN_CENTRE_HORIZONTAL'},\n 'wxTE_RIGHT': {'rename_to':'wxALIGN_RIGHT'},\n 'wxTE_DONTWRAP': {'obsolete':True},\n 'wxTE_LINEWRAP': {'obsolete':True},\n 'wxTE_CHARWRAP': {'obsolete':True},\n 'wxTE_WORDWRAP': {'obsolete':True},\n 'wxTE_BESTWRAP': {'obsolete':True},\n 'wxTE_CAPITALIZE': {'obsolete':True}\n },\n 'default_style': 'wxSP_ARROW_KEYS',\n 'style_list': ['wxSP_ARROW_KEYS', 'wxSP_WRAP',\n 'wxTE_PROCESS_ENTER',\n 'wxALIGN_LEFT', 'wxALIGN_CENTRE_HORIZONTAL', 'wxALIGN_RIGHT'],\n 'events': {\n 'EVT_SPINCTRLDOUBLE': { 'type': 'wxSpinEventDouble' },\n 'EVT_TEXT': {},\n 'EVT_TEXT_ENTER': {},\n },\n}\n","sub_path":"widgets/spin_ctrl_double/wconfig.py","file_name":"wconfig.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"290190155","text":"from typing import Dict, Any, Union, Type, List, Optional, Tuple\n\nimport attr\nfrom attr.exceptions import NotAnAttrsClassError\n\ntry:\n import dataclasses\nexcept ModuleNotFoundError:\n dataclasses = None\n\ntry:\n # Python 3.6\n from typing import GenericMeta as GenericType\n\n def _get_origin(t: GenericType):\n return t.__extra__\n\n\nexcept ImportError:\n # Python >=3.7\n from typing import _GenericAlias as GenericType\n\n def _get_origin(t: GenericType):\n return t.__origin__\n\n\n@attr.attrs\nclass Field:\n name: str = attr.attrib()\n field_type: Type = attr.attrib()\n mandatory: bool = attr.attrib()\n validator: Optional[callable] = attr.attrib(default=None)\n converter: Optional[callable] = attr.attrib(default=None)\n\n\ndef resolve_types(\n to_resolve: Dict[Union[Type, str], Any], globals: Dict[str, Any]\n) -> Dict[Type, Any]:\n return {_resolve_type(globals, k): v for k, v in to_resolve.items()}\n\n\ndef get_fields(obj_type: Type) -> List[Field]:\n try:\n return [\n Field(f.name, f.type, f.default == attr.NOTHING, f.validator, f.converter)\n for f in attr.fields(obj_type)\n ]\n except NotAnAttrsClassError:\n try:\n return [\n Field(f.name, f.type, _dataclass_field_mandatory(f))\n for f in dataclasses.fields(obj_type)\n ]\n except (TypeError, AttributeError):\n pass\n raise TypeError(\"can only serialize attrs or dataclass classes\")\n\n\ndef normalize_method(method) -> callable:\n return method.__func__ if isinstance(method, staticmethod) else method\n\n\ndef normalize_type(t: Union[type, GenericType]) -> Tuple[type, tuple]:\n if isinstance(t, GenericType):\n real_type = _get_origin(t)\n generic_args = t.__args__\n elif t is None or isinstance(t, type):\n real_type = t\n generic_args = tuple()\n else:\n raise TypeError(\n f\"Found type annotation {t}, which is not a type and not a generic.\"\n )\n return real_type, generic_args\n\n\ndef is_obj_supported_primitive(obj):\n return (\n isinstance(obj, bool)\n or isinstance(obj, int)\n or isinstance(obj, float)\n or isinstance(obj, str)\n or obj is None\n )\n\n\ndef _resolve_type(globals, t):\n return globals.get(t) if isinstance(t, str) else t\n\n\ndef _dataclass_field_mandatory(field):\n return (\n field.default == dataclasses.MISSING\n and field.default_factory == dataclasses.MISSING\n )\n","sub_path":"src/yasoo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483910968","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n dpl = [0] * len(height)\n dpr = [0] * len(height)\n res = 0\n \n for i in range(1, len(height)):\n dpl[i] = max(height[i - 1], dpl[i - 1])\n \n for i in range(len(height) - 2, -1, -1):\n dpr[i] = max(height[i + 1], dpr[i + 1])\n \n # print(dpl)\n # print(dpr)\n \n for i in range(len(height)):\n res += max(0, min(dpl[i], dpr[i]) - height[i])\n return res\n\n def trap(self, height: List[int]) -> int:\n if not height: return 0\n l = 0\n r = len(height) - 1\n maxl = height[0]\n maxr = height[-1]\n res = 0\n \n while l < r:\n if height[l] < height[r]:\n res += max(0, maxl - height[l])\n maxl = max(height[l], maxl)\n l += 1\n else:\n res += max(0, maxr - height[r])\n maxr = max(height[r], maxr)\n r -= 1\n #print(l,r, res)\n \n \n return res\n def trap(self, height: List[int]) -> int:\n stack = []\n res = 0\n for i in range(len(height)):\n while stack and height[stack[-1]] <= height[i]:\n h = height[stack.pop()]\n if not stack:\n break\n res += (min(height[stack[-1]], height[i]) - h) * (i - stack[-1] - 1) \n \n stack.append(i)\n return res\n \n\n \n \n \n \n \n ","sub_path":"Python/42_Trapping Rain Water.py","file_name":"42_Trapping Rain Water.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"164326764","text":"'''Reference: https://github.com/alpacahq/alpaca-trade-api-python/tree/master/examples'''\r\n\r\nimport datetime\r\nimport threading\r\nfrom neo_finrl.alpaca.alpaca_engineer import AlpacaEngineer \r\nimport alpaca_trade_api as tradeapi\r\nimport time\r\nimport pandas as pd\r\nimport numpy as np\r\nimport torch\r\n\r\n'''please input your own account info'''\r\nAPI_KEY = \"\"\r\nAPI_SECRET = \"\"\r\nAPCA_API_BASE_URL = 'https://paper-api.alpaca.markets'\r\ndata_url = 'wss://data.alpaca.markets'\r\n\r\n'''load prepared model'''\r\naction_dim = 5\r\nstate_dim = 1+ 1 + 1+ 2*5+ 5*7\r\nfrom elegantrl.agent import AgentPPO\r\nagent = AgentPPO()\r\nnet_dim = 2 ** 7\r\ncwd = './AgentPPO/test-v1'\r\nagent.init(net_dim, state_dim, action_dim)\r\nagent.save_load_model(cwd=cwd, if_save=False)\r\nact = agent.act\r\ndevice = agent.device\r\n\r\n'''paper trading class'''\r\nclass PPO_PaperTrading:\r\n def __init__(self):\r\n self.alpaca = tradeapi.REST(API_KEY,API_SECRET,APCA_API_BASE_URL, 'v2')\r\n stockUniverse = [\r\n 'AAPL', 'AMZN', 'FB', 'GOOG', 'NFLX'\r\n ]\r\n self.stocks = np.asarray([0] * len(stockUniverse))\r\n self.cash = None\r\n self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = stockUniverse)\r\n self.stockUniverse = stockUniverse\r\n self.price = np.asarray([0] * len(stockUniverse))\r\n self.turb_bool = 0\r\n self.equities = []\r\n \r\n def run(self):\r\n orders = self.alpaca.list_orders(status=\"open\")\r\n for order in orders:\r\n self.alpaca.cancel_order(order.id)\r\n \r\n # Wait for market to open.\r\n print(\"Waiting for market to open...\")\r\n tAMO = threading.Thread(target=self.awaitMarketOpen)\r\n tAMO.start()\r\n tAMO.join()\r\n print(\"Market opened.\")\r\n while True:\r\n\r\n # Figure out when the market will close so we can prepare to sell beforehand.\r\n clock = self.alpaca.get_clock()\r\n closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()\r\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\r\n self.timeToClose = closingTime - currTime\r\n \r\n if(self.timeToClose < (60 * 15)):\r\n # Close all positions when 15 minutes til market close.\r\n print(\"Market closing soon. Closing positions.\")\r\n \r\n positions = self.alpaca.list_positions()\r\n for position in positions:\r\n if(position.side == 'long'):\r\n orderSide = 'sell'\r\n else:\r\n orderSide = 'buy'\r\n qty = abs(int(float(position.qty)))\r\n respSO = []\r\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\r\n tSubmitOrder.start()\r\n tSubmitOrder.join()\r\n \r\n # Run script again after market close for next trading day.\r\n print(\"Sleeping until market close (15 minutes).\")\r\n time.sleep(60 * 15)\r\n else:\r\n # Trade and save equity records\r\n trade = threading.Thread(target=self.trade)\r\n trade.start()\r\n trade.join()\r\n last_equity = float(self.alpaca.get_account().last_equity)\r\n cur_time = time.time()\r\n self.equities.append([cur_time,last_equity])\r\n np.save('./equity.npy', np.asarray(self.equities, dtype = float))\r\n time.sleep(60)\r\n \r\n def awaitMarketOpen(self):\r\n isOpen = self.alpaca.get_clock().is_open\r\n while(not isOpen):\r\n clock = self.alpaca.get_clock()\r\n openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()\r\n currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()\r\n timeToOpen = int((openingTime - currTime) / 60)\r\n print(str(timeToOpen) + \" minutes til market open.\")\r\n time.sleep(60)\r\n isOpen = self.alpaca.get_clock().is_open\r\n \r\n def trade(self):\r\n state = self.get_state()\r\n with torch.no_grad():\r\n s_tensor = torch.as_tensor((state,), device=device)\r\n a_tensor = act(s_tensor) \r\n action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because with torch.no_grad() outside\r\n action = (action * 100).astype(int)\r\n if self.turb_bool == 0:\r\n min_action = 10 \r\n for index in np.where(action < -min_action)[0]: # sell:\r\n sell_num_shares = min(self.stocks[index], -action[index])\r\n qty = abs(int(sell_num_shares))\r\n respSO = []\r\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))\r\n tSubmitOrder.start()\r\n tSubmitOrder.join()\r\n self.cash = float(self.alpaca.get_account().cash)\r\n\r\n for index in np.where(action > min_action)[0]: # buy:\r\n if self.cash < 0:\r\n tmp_cash = 0\r\n else:\r\n tmp_cash = self.cash\r\n buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))\r\n qty = abs(int(buy_num_shares))\r\n respSO = []\r\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))\r\n tSubmitOrder.start()\r\n tSubmitOrder.join()\r\n self.cash = float(self.alpaca.get_account().cash)\r\n \r\n else: # sell all when turbulence\r\n positions = self.alpaca.list_positions()\r\n for position in positions:\r\n if(position.side == 'long'):\r\n orderSide = 'sell'\r\n else:\r\n orderSide = 'buy'\r\n qty = abs(int(float(position.qty)))\r\n respSO = []\r\n tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))\r\n tSubmitOrder.start()\r\n tSubmitOrder.join()\r\n \r\n\r\n def get_state(self):\r\n AE = AlpacaEngineer(api=self.alpaca)\r\n df = self.alpaca.get_barset(self.stockUniverse, '1Min', limit=1000).df\r\n df = AE.add_technical_indicators(df, self.stockUniverse)\r\n time = df.index\r\n first_time = True\r\n for stock in self.stockUniverse:\r\n if first_time == True:\r\n closes = df[(stock,'close')].values\r\n ary = np.vstack([time,closes]).T\r\n tmp_df = pd.DataFrame(ary, columns = ['date','close'])\r\n tmp_df['tic'] = stock\r\n first_time = False\r\n else:\r\n closes = df[(stock,'close')]\r\n ary = np.vstack([time,closes]).T\r\n tmp_tmp_df = pd.DataFrame(ary, columns = ['date','close'])\r\n tmp_tmp_df['tic'] = stock\r\n tmp_df = tmp_df.append(tmp_tmp_df)\r\n \r\n tmp_df = AE.add_turbulence(tmp_df)\r\n turbulence_ary = tmp_df[tmp_df.tic==self.stockUniverse[0]]['turbulence'].values\r\n turbulence_bool = (turbulence_ary > int(1e4)).astype(np.float32)\r\n turbulence_ary = (turbulence_ary * 2 ** -7).clip((int(1e4)) * 2)\r\n price_array, tech_array = AE.df_to_ary(df, self.stockUniverse)\r\n price = price_array[-1]\r\n self.price = price\r\n tech = tech_array[-1]\r\n turb = turbulence_ary[-1]\r\n turb_bool = turbulence_bool[-1]\r\n self.turb_bool = turb_bool\r\n positions = self.alpaca.list_positions()\r\n stocks = [0] * 5\r\n for position in positions:\r\n ind = self.stockUniverse.index(position.symbol)\r\n stocks[ind] = ( abs(int(float(position.qty))))\r\n self.stocks = stocks\r\n stocks = np.asarray(stocks, dtype = float)\r\n cash = float(self.alpaca.get_account().cash)\r\n self.cash = cash\r\n state = np.hstack((max(cash, 1e4) * (2 ** -17),\r\n price * (2 ** -9),\r\n turb,\r\n turb_bool,\r\n stocks * (2 ** -5),\r\n tech * (2 **-9),\r\n )).astype(np.float32) \r\n return state\r\n \r\n def submitOrder(self, qty, stock, side, resp):\r\n if(qty > 0):\r\n try:\r\n self.alpaca.submit_order(stock, qty, side, \"market\", \"day\")\r\n print(\"Market order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | completed.\")\r\n resp.append(True)\r\n except:\r\n print(\"Order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | did not go through.\")\r\n resp.append(False)\r\n else:\r\n print(\"Quantity is 0, order of | \" + str(qty) + \" \" + stock + \" \" + side + \" | not completed.\")\r\n resp.append(True)\r\n \r\ndrl = PPO_PaperTrading()\r\ndrl.run()\r\n","sub_path":"finrl/neo_finrl/alpaca/alpaca_paper.py","file_name":"alpaca_paper.py","file_ext":"py","file_size_in_byte":8966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"112350556","text":"#!/usr/bin/env python3\nimport pandas as pd\n#import sys\n\ninput_file = 'xls/sales_2013.xlsx'\noutput_file = 'xls/ex02_pandas_output.xls'\n\n#딕셔너리 자료구조를 가짐.\ndata_frame = pd.read_excel(input_file, sheet_name=None, index_col=None)\n\ncolumns_output = []\nfor worksheet_name, data in data_frame.items():\n #2개의 열에 대해서 모든 행을 추가함.\n columns_output.append(data.loc[:, ['Customer Name', 'Sale Amount']])\n#출력된 값을 붙임\nselected_columns = pd.concat(columns_output, axis=0, ignore_index=True)\n\nwriter = pd.ExcelWriter(output_file)\nselected_columns.to_excel(writer, sheet_name='selected_columns_all_worksheets', index=False)\nwriter.save()","sub_path":"FoundationsForAnalyticsWithPython/Week8/ex02_pandas.py","file_name":"ex02_pandas.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190498353","text":"import numpy as np\nimport pandas as pd\n\ndef tester(op, pro):\n labels_file = open(op.testlabel)\n labels = [int(i) for i in labels_file.readlines()]\n\n \n data = [0] * len(labels)\n features_file = open(op.testdata)\n for line in features_file.readlines():\n list_ = line.split()\n email_num, word_token, word_nums = int(list_[0]), int(list_[1]), int(list_[2])\n\n if data[email_num-1] == 0:\n data[email_num-1] = {'text': [word_token]*word_nums, 'label': labels[email_num-1]}\n else:\n list_ = data[email_num-1]['text']\n list_.extend([word_token]*word_nums)\n data[email_num-1]['text'] = list_\n\n p_w_spam, p_w_ham, p_spam, p_ham = pro['p_w_spam'], pro['p_w_ham'], pro['p_spam'], pro['p_ham']\n word_nums, w_spam_total, w_ham_total = pro['word_nums'], pro['w_spam_total'], pro['w_ham_total']\n prediect = []\n\n for i in range(len(labels)):\n text = data[i]['text']\n p_w_s, p_w_h = 0, 0\n for word in text:\n p_w_ham_ = 1 / (word_nums + w_ham_total) if word not in p_w_ham else p_w_ham[word]\n p_w_spam_ = 1 / (word_nums + w_spam_total) if word not in p_w_spam else p_w_spam[word]\n\n p_w_s += np.log(p_w_spam_)\n p_w_h += np.log(p_w_ham_)\n if p_w_s + np.log(p_spam) > p_w_h + np.log(p_ham):\n prediect.append(1)\n else:\n prediect.append(0)\n print(\" \".join([str(prediect[i]) for i in range(len(prediect))]))\n precise = 0\n recall = 0\n for i in range(len(labels)):\n if prediect[i] == labels[i]:\n precise += 1\n if prediect[i] == 1 and labels[i] == 1:\n recall += 1\n print(\"precise: \", precise / len(labels))\n print(\"recall: \", recall / labels.count(1))\n","sub_path":"hw1/spam detection/util/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"641307767","text":"import cv2\nimport numpy as np\nfaceCascade = cv2.CascadeClassifier('IntelliCat/haarcascade_frontalface_alt.xml')\n\ndef find_faces(image):\n coordinates = locate_faces(image)\n cropped_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in coordinates]\n normalized_faces = [normalize_face(face) for face in cropped_faces]\n return zip(normalized_faces, coordinates)\n\ndef find_faces_emo(image):\n coordinates = locate_faces(image)\n cropped_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in coordinates]\n normalized_faces = [normalize_face_emo(face) for face in cropped_faces]\n return zip(normalized_faces, coordinates)\n\ndef normalize_face_emo(face):\n face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n face = cv2.resize(face, (48, 48))\n return face;\n\ndef normalize_face(face):\n face = cv2.resize(face, (64, 64),interpolation = cv2.INTER_CUBIC)\n # face = np.array([face])\n face = np.expand_dims(face, axis=0)\n return face\n # im = cv2.resize(face, (64, 64))\n # im.reshape((64, 64))\n # batch = np.expand_dims(im, axis=0)\n # batch = np.expand_dims(batch, axis=3)\n # return batch\n\n\ndef locate_faces(image):\n faces = faceCascade.detectMultiScale(\n image,\n scaleFactor=1.1,\n minNeighbors=15,\n minSize=(70, 70)\n )\n\n return faces","sub_path":"demo_webApp/IntelliCat/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"268690145","text":"\"\"\"Tests for the processors for django-mailer.\"\"\"\nfrom django.test import TestCase\nfrom django.utils.timezone import now, timedelta\n\nfrom mailer.models import PRIORITY_DEFERRED, PRIORITY_MEDIUM\nfrom mixer.backend.django import mixer\n\nfrom ..constants import SERVER_STATUS\nfrom ..processors.django_mailer import deferred_emails, email_queue\n\n\nclass MailerDeferredEmailsProcessorTestCase(TestCase):\n \"\"\"Test case for the ``deferred_emails`` django-mailer processor.\"\"\"\n longMessage = True\n\n def setUp(self):\n self.normal_messages = mixer.cycle(10).blend(\n 'mailer.Message', priority=PRIORITY_MEDIUM)\n\n def test_deferred_emails(self):\n self.assertEqual(\n deferred_emails()['status'],\n SERVER_STATUS['OK'],\n msg='Without deferred emails, the status should be OK.'\n )\n\n mixer.cycle(1).blend('mailer.Message', priority=PRIORITY_DEFERRED)\n self.assertEqual(\n deferred_emails()['status'],\n SERVER_STATUS['WARNING'],\n msg='With 1 deferred email, the status should be WARNING.'\n )\n\n mixer.cycle(9).blend('mailer.Message', priority=PRIORITY_DEFERRED)\n self.assertEqual(\n deferred_emails()['status'],\n SERVER_STATUS['DANGER'],\n msg='With 10 deferred emails, the status should be DANGER.'\n )\n\n\nclass MailerEmailQueueProcessorTestCase(TestCase):\n \"\"\"Test case for the ``email_queue`` django-mailer processor.\"\"\"\n longMessage = True\n\n def setUp(self):\n self.deferred_messages = mixer.cycle(5).blend(\n 'mailer.Message', priority=PRIORITY_DEFERRED)\n self.recently_added = mixer.cycle(5).blend(\n 'mailer.Message', when_added=now(),\n priority=PRIORITY_MEDIUM)\n\n def test_deferred_emails(self):\n self.assertEqual(\n email_queue()['status'],\n SERVER_STATUS['OK'],\n msg='Without queued emails, the status should be OK.'\n )\n\n mixer.cycle(1).blend('mailer.Message',\n when_added=now() - timedelta(minutes=40),\n priority=PRIORITY_MEDIUM)\n self.assertEqual(\n email_queue()['status'],\n SERVER_STATUS['WARNING'],\n msg='With 1 queued email, the status should be WARNING.'\n )\n\n mixer.cycle(99).blend('mailer.Message',\n when_added=now() - timedelta(minutes=40),\n priority=PRIORITY_MEDIUM)\n self.assertEqual(\n email_queue()['status'],\n SERVER_STATUS['DANGER'],\n msg='With 100 queued emails, the status should be DANGER.'\n )\n","sub_path":"server_guardian_api/tests/processors_tests.py","file_name":"processors_tests.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"288731305","text":"# Copyright 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\nfrom sawtooth_sdk.processor.exceptions import InvalidTransaction\n\nfrom rbac_addressing import addresser\n\nfrom rbac_processor.common import no_open_proposal\nfrom rbac_processor.role.common import handle_confirm_add\nfrom rbac_processor.role.common import handle_propose_state_set\nfrom rbac_processor.role.common import handle_reject\nfrom rbac_processor.role.common import validate_role_admin_or_owner\nfrom rbac_processor.role.common import validate_role_rel_proposal\n\nfrom rbac_processor.protobuf import proposal_state_pb2\nfrom rbac_processor.protobuf import role_transaction_pb2\n\n\ndef apply_propose(header, payload, state):\n proposal_payload = role_transaction_pb2.ProposeAddRoleMember()\n proposal_payload.ParseFromString(payload.content)\n\n role_members_address = addresser.make_role_members_address(\n role_id=proposal_payload.role_id,\n user_id=proposal_payload.user_id)\n\n proposal_address = addresser.make_proposal_address(\n object_id=proposal_payload.role_id,\n related_id=proposal_payload.user_id)\n\n state_entries = validate_role_rel_proposal(\n header,\n proposal_payload,\n role_members_address,\n state)\n\n if not no_open_proposal(\n state_entries=state_entries,\n object_id=proposal_payload.role_id,\n related_id=proposal_payload.user_id,\n proposal_address=proposal_address,\n proposal_type=proposal_state_pb2.Proposal.ADD_ROLE_MEMBERS):\n raise InvalidTransaction(\n \"There is already an open proposal for ADD_ROLE_MEMBERS \"\n \"with role id {} and user id {}\".format(\n proposal_payload.role_id,\n proposal_payload.user_id))\n\n handle_propose_state_set(\n state_entries=state_entries,\n header=header,\n payload=proposal_payload,\n address=proposal_address,\n proposal_type=proposal_state_pb2.Proposal.ADD_ROLE_MEMBERS,\n state=state)\n\n\ndef apply_propose_remove(header, payload, state):\n proposal_payload = role_transaction_pb2.ProposeRemoveRoleMember()\n proposal_payload.ParseFromString(payload.content)\n\n role_members_address = addresser.make_role_members_address(\n role_id=proposal_payload.role_id,\n user_id=proposal_payload.user_id)\n\n proposal_address = addresser.make_proposal_address(\n object_id=proposal_payload.role_id,\n related_id=proposal_payload.user_id)\n\n state_entries = validate_role_rel_proposal(\n header,\n proposal_payload,\n role_members_address,\n state, True)\n\n if not no_open_proposal(\n state_entries=state_entries,\n object_id=proposal_payload.role_id,\n related_id=proposal_payload.user_id,\n proposal_address=proposal_address,\n proposal_type=proposal_state_pb2.Proposal.REMOVE_ROLE_MEMBERS):\n raise InvalidTransaction(\n \"There is already an open proposal for REMOVE_ROLE_MEMBERS \"\n \"with role id {} and user id {}\".format(\n proposal_payload.role_id,\n proposal_payload.user_id))\n\n handle_propose_state_set(\n state_entries=state_entries,\n header=header,\n payload=proposal_payload,\n address=proposal_address,\n proposal_type=proposal_state_pb2.Proposal.REMOVE_ROLE_MEMBERS,\n state=state)\n\n\ndef apply_confirm(header, payload, state):\n confirm_payload = role_transaction_pb2.ConfirmAddRoleAdmin()\n confirm_payload.ParseFromString(payload.content)\n\n role_members_address = addresser.make_role_members_address(\n role_id=confirm_payload.role_id,\n user_id=confirm_payload.user_id)\n\n txn_signer_owners_address = addresser.make_role_owners_address(\n role_id=confirm_payload.role_id,\n user_id=header.signer_public_key)\n\n state_entries = validate_role_admin_or_owner(\n header=header,\n confirm=confirm_payload,\n txn_signer_rel_address=txn_signer_owners_address,\n state=state)\n\n handle_confirm_add(\n state_entries=state_entries,\n header=header,\n confirm=confirm_payload,\n role_rel_address=role_members_address,\n state=state)\n\n\ndef apply_reject(header, payload, state):\n reject_payload = role_transaction_pb2.RejectAddRoleAdmin()\n reject_payload.ParseFromString(payload.content)\n\n txn_signer_owners_address = addresser.make_role_owners_address(\n reject_payload.role_id,\n header.signer_public_key)\n\n state_entries = validate_role_admin_or_owner(\n header=header,\n confirm=reject_payload,\n txn_signer_rel_address=txn_signer_owners_address,\n state=state)\n\n handle_reject(\n state_entries,\n header,\n reject=reject_payload,\n state=state)\n","sub_path":"processor/rbac_processor/role/role_members.py","file_name":"role_members.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89292321","text":"from django import forms\nfrom mainApp.models import Car, Product, Brand, Supplier, Supplier_Product, Car_Product, Category, Category_Relation\n\n\nclass CarForm(forms.ModelForm):\n class Meta:\n model = Car\n fields = ('Car_ID', 'Car_Name', 'Brand_ID', 'Brand_Name', 'Model', 'Variant', 'Fuel_Type', 'Transmission_Type',\n 'Service_Manual_Available', 'Alternate_Fuel')\n\n\nclass ProductForm(forms.ModelForm):\n class Meta:\n model = Product\n fields = ('Product_ID', 'Product_Name', 'Product_Technical_Name', 'Product_Category', 'Product_Sub_Category',\n 'Product_Sub_Sub_Category', 'Product_Description', 'Product_Dimentions', 'Product_Weight',\n 'Product_Color', 'Product_Material', 'Product_Meta_Description', 'Product_Meta_Keywords',\n 'Product_Brand_Part_Number', 'Product_Brand_Name', 'Product_MRP', 'List_Price', 'OEM', 'OES',\n 'Private_Label', 'Product_Image_URL', 'Type', 'Car_ID')\n\n\nclass BrandForm(forms.ModelForm):\n class Meta:\n model = Brand\n fields = ('Brand_ID', 'Brand_Name')\n\n\nclass Supplierform(forms.ModelForm):\n class Meta:\n model = Supplier\n fields = (\n 'Supplier_ID', 'Supplier_Name', 'Supplier_Lat', 'Supplier_Long', 'Supplier_Add1', 'Supplier_Add2', 'Landmark',\n 'City', 'State', 'Country', 'Postal_Code', 'Supplier_Contact1', 'Supplier_Contact2', 'Supplier_Contact3',\n 'Product_ID')\n\n\nclass Supplier_ProductForm(forms.ModelForm):\n class Meta:\n model = Supplier_Product\n fields = ('Supplier_Product_ID', 'Supplier_ID', 'Product_ID', 'Supplier_Discount', 'Supplier_Price')\n\n\nclass Car_ProductForm(forms.ModelForm):\n class Meta:\n model = Car_Product\n fields = ('Car_Product_ID', 'Car_ID', 'Product_ID')\n\n\nclass CategoryForm(forms.ModelForm):\n class Meta:\n model = Category\n fields = ('Categort_ID', 'Level', 'Category_Name')\n\n\nclass Category_RelationForm(forms.ModelForm):\n class Meta:\n model = Category_Relation\n fields = ('Category_Relation_ID', 'Parent_ID', 'Child_ID')\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"carcrew/mainApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"73514009","text":"import pprint\npeople = {}\nnumber = 0\nfor number in range(1, 4):\n person_name = input(\"What's his name?\")\n person_gender = input(\"Whaat's gender?\")\n person_occupation = input(\"What's your job?\")\n person_star = input(\"Where are you form?\")\n people[person_name]= { 'Name' : person_name, 'Gender' : person_gender, 'Occupation' : person_occupation, 'Home Planet' : person_star }\n print(people[person_name])\n number += 1\nelse:\n pprint.pprint(people)\n \n\n \n","sub_path":"03/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"312699826","text":"#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 2008-2014 California Institute of Technology.\n# License: 3-clause BSD. The full license text is available at:\n# - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE\n\"\"\"\nExtensions to python's 'inspect' module, which can be used\nto retrieve information from live python objects. The primary\ntarget of 'dill.source' is to facilitate access to the source\ncode of interactively defined functions and classes.\n\"\"\"\n\n__all__ = ['getblocks', 'getsource', '_wrap', 'getname',\\\n 'getimportable', 'likely_import', '_namespace']\n\nimport sys\nPYTHON3 = (hex(sys.hexversion) >= '0x30000f0')\n\ndef getblocks(object, lstrip=False):# gettype=False):\n \"\"\"extract code blocks from a code object using stored history\"\"\"\n import readline, inspect #, types\n lbuf = readline.get_current_history_length()\n code = [readline.get_history_item(i)+'\\n' for i in range(1,lbuf)]\n lnum = 0\n codeblocks = []\n #objtypes = []\n try:\n if PYTHON3:\n fname = object.__name__\n ocode = object.__code__\n else:\n fname = object.func_name\n ocode = object.func_code\n cname = ''\n except AttributeError:\n fname = ''\n ocode = lambda :'__this_is_a_big_dummy_object__'\n ocode.co_code = '__this_is_a_big_dummy_co_code__'\n #try: inspect.getmro(object) #XXX: ensure that it's a class\n if hasattr(object, '__name__'): cname = object.__name__ # class\n else: cname = object.__class__.__name__ # instance\n while lnum < len(code):#-1:\n if fname and code[lnum].lstrip().startswith('def '):\n # functions and methods\n block = inspect.getblock(code[lnum:])\n lnum += len(block)\n if block[0].lstrip().startswith('def %s(' % fname):\n if lstrip: block[0] = block[0].lstrip()\n codeblocks.append(block)\n # obtypes.append(types.FunctionType)\n elif cname and code[lnum].lstrip().startswith('class '):\n # classes and instances\n block = inspect.getblock(code[lnum:])\n lnum += len(block)\n _cname = ('class %s(' % cname, 'class %s:' % cname)\n if block[0].lstrip().startswith(_cname):\n if lstrip: block[0] = block[0].lstrip()\n codeblocks.append(block)\n elif fname and 'lambda ' in code[lnum]:\n # lambdas\n block = inspect.getblock(code[lnum:])\n lnum += len(block)\n lhs,rhs = block[0].split('lambda ',1)[-1].split(\":\", 1) #FIXME: bad\n try: #FIXME: unsafe\n _ = eval(\"lambda %s : %s\" % (lhs, rhs), globals(), locals())\n except: _ = lambda : \"__this_is_a_big_dummy_function__\"\n if PYTHON3: _ = _.__code__\n else: _ = _.func_code\n if _.co_code == ocode.co_code:\n if lstrip: block[0] = block[0].lstrip()\n codeblocks.append(block)\n # obtypes.append('')\n #XXX: would be nice to grab constructor for instance, but yikes.\n else:\n lnum +=1\n #if gettype: return codeblocks, objtypes \n return codeblocks #XXX: danger... gets methods and closures w/o containers\n\ndef getsource(object, alias=''):\n \"\"\"Extract source code from python code object.\n\nThis function is designed to work with simple functions, and will not\nwork on any general callable. However, this function can extract source\ncode from functions that are defined interactively.\n \"\"\"\n import inspect\n _types = ()\n try:\n if PYTHON3:\n ocode = object.__code__\n attr = '__code__'\n else:\n ocode = object.func_code\n attr = 'func_code'\n mname = ocode.co_filename\n except AttributeError:\n try:\n inspect.getmro(object) # ensure it's a class\n mname = inspect.getfile(object)\n except TypeError: # fails b/c class defined in __main__, builtin\n mname = object.__module__\n if mname == '__main__': mname = ''\n except AttributeError: # fails b/c it's not a class\n _types = ('\")#,\"\")\n if not repr(type(object)).startswith(_types): raise\n mname = getattr(object, '__module__', None)\n if mname == '__main__': mname = ''\n attr = '__module__' #XXX: better?\n # no try/except\n if hasattr(object,attr) and mname == '':\n # class/function is typed in at the python shell (instance ok)\n lines = getblocks(object, lstrip=True)[-1]\n else:\n try: # get class/functions from file (instances fail)\n lines = inspect.getsourcelines(object)[0]\n # remove indentation from first line\n lines[0] = lines[0].lstrip()\n except TypeError: # failed to get source, resort to import hooks\n if _types: name = object.__class__.__name__\n else: name = object.__name__\n #module = object.__module__.replace('__builtin__','__builtins__')\n module = object.__module__\n if module in ['__builtin__','__builtins__']:\n lines = [\"%s = %s\\n\" % (name, name)]\n else:\n lines = [\"%s = __import__('%s', fromlist=['%s']).%s\\n\" % (name,module,name,name)]\n if _types: # we now go for the class source\n obj = eval(lines[0].lstrip(name + ' = '))\n lines = inspect.getsourcelines(obj)[0]\n lines[0] = lines[0].lstrip()\n if _types: # instantiate, if there's a nice repr #XXX: BAD IDEA???\n if '(' in repr(object): lines.append('\\n_ = %s\\n' % repr(object))\n else: object.__code__ # raise AttributeError\n if alias:\n if attr != '__module__':\n if lines[0].startswith('def '): # we have a function\n lines.append('\\n%s = %s\\n' % (alias, object.__name__))\n elif 'lambda ' in lines[0]: # we have a lambda\n lines[0] = '%s = %s' % (alias, lines[0])\n else: # ...try to use the object's name\n lines.append('\\n%s = %s\\n' % (alias, object.__name__))\n else: # class or class instance\n if _types: lines.append('%s = _\\n' % alias)\n else: lines.append('\\n%s = %s\\n' % (alias, object.__name__))\n return ''.join(lines)\n\n#exec_ = lambda s, *a: eval(compile(s, '', 'exec'), *a)\n__globals__ = globals()\n__locals__ = locals()\nwrap2 = '''\ndef _wrap(f):\n \"\"\" encapsulate a function and it's __import__ \"\"\"\n def func(*args, **kwds):\n try:\n #_ = eval(getsource(f)) #FIXME: safer, but not as robust\n exec getimportable(f, alias='_') in %s, %s\n except:\n raise ImportError('cannot import name ' + f.__name__)\n return _(*args, **kwds)\n func.__name__ = f.__name__\n func.__doc__ = f.__doc__\n return func\n''' % ('__globals__', '__locals__')\nwrap3 = '''\ndef _wrap(f):\n \"\"\" encapsulate a function and it's __import__ \"\"\"\n def func(*args, **kwds):\n try:\n #_ = eval(getsource(f)) #FIXME: safer, but not as robust\n exec(getimportable(f, alias='_'), %s, %s)\n except:\n raise ImportError('cannot import name ' + f.__name__)\n return _(*args, **kwds)\n func.__name__ = f.__name__\n func.__doc__ = f.__doc__\n return func\n''' % ('__globals__', '__locals__')\nif PYTHON3:\n exec(wrap3)\nelse:\n exec(wrap2)\ndel wrap2, wrap3\n\ndef getname(obj): #XXX: too simple... pull in logic from getimportable, etc ?\n \"\"\" get the name of the object. for lambdas, get the name of the pointer \"\"\"\n if obj.__name__ == '':\n return getsource(obj).split('=',1)[0].strip()\n return obj.__name__\n\ndef _namespace(obj):\n \"\"\"_namespace(obj); return namespace hierarchy (as a list of names)\n for the given object.\n\n For example:\n\n >>> from functools import partial\n >>> p = partial(int, base=2)\n >>> _namespace(p)\n [\\'functools\\', \\'partial\\']\n \"\"\"\n # mostly for functions and modules and such\n try: #FIXME: this function needs some work and testing on different types\n from inspect import getmodule, ismodule\n qual = str(getmodule(obj)).split()[1].strip('\"').strip(\"'\")\n qual = qual.split('.')\n if ismodule(obj):\n return qual\n try: # special case: get the name of a lambda\n name = getname(obj)\n except: #XXX: fails to get name\n name = obj.__name__\n return qual + [name] #XXX: can be wrong for some aliased objects\n except: pass\n # special case: numpy.inf and numpy.nan (we don't want them as floats)\n if str(obj) in ['inf','nan','Inf','NaN']: # is more, but are they needed?\n return ['numpy'] + [str(obj)]\n # mostly for classes and class instances and such\n module = getattr(obj.__class__, '__module__', None)\n qual = str(obj.__class__)\n try: qual = qual[qual.index(\"'\")+1:-2]\n except ValueError: pass # str(obj.__class__) made the 'try' unnecessary\n qual = qual.split(\".\")\n if module in ['builtins', '__builtin__']:\n qual = [module] + qual\n return qual\n\ndef _likely_import(first, last, passive=False, explicit=False):\n \"\"\"build a likely import string\"\"\"\n # we don't need to import from builtins, so return ''\n if last in ['NoneType','int','float','long','complex']: return ''#XXX: more\n if not explicit and first in ['builtins','__builtin__']: return ''\n # get likely import string\n if not first: _str = \"import %s\\n\" % last\n else: _str = \"from %s import %s\\n\" % (first, last)\n # FIXME: breaks on most decorators, currying, and such...\n # (could look for magic __wrapped__ or __func__ attr)\n if not passive and not first.startswith('dill.'):# weird behavior for dill\n #print(_str)\n try: exec(_str) #XXX: check if == obj? (name collision)\n except ImportError: #XXX: better top-down or bottom-up recursion?\n _first = first.rsplit(\".\",1)[0] #(or get all, then compare == obj?)\n if not _first: raise\n if _first != first:\n _str = _likely_import(_first, last, passive)\n return _str\n\ndef likely_import(obj, passive=False, explicit=False):\n \"\"\"get the likely import string for the given object\n\n obj: the object to inspect\n passive: if True, then don't try to verify with an attempted import\n explicit: if True, then also include imports for builtins\n \"\"\"\n # for named things... with a nice repr #XXX: move into _namespace?\n if not repr(obj).startswith('<'): name = repr(obj).split('(')[0]\n else: name = None\n # get the namespace\n qual = _namespace(obj)\n first = '.'.join(qual[:-1])\n last = qual[-1]\n if name: # try using name instead of last\n try: return _likely_import(first, name, passive)\n except (ImportError,SyntaxError): pass\n try:\n if type(obj) is type(abs): _explicit = explicit # BuiltinFunctionType\n else: _explicit = False\n return _likely_import(first, last, passive, _explicit)\n except (ImportError,SyntaxError):\n raise # could do some checking against obj\n\n\ndef getimportable(obj, alias='', byname=True, explicit=False):\n \"\"\"attempt to get an importable string that captures the state of obj\n\nFor simple objects, this function will discover the name of the object, or the\nrepr of the object, or the source code for the object. To attempt to force\ndiscovery of the source code, use byname=False. The intent is to build a\nstring that can be imported from a python file. Use explicit=True if imports\nfrom builtins need to be included.\n \"\"\"\n #try: # get the module name (to see if it's __main__)\n # module = str(getmodule(obj)).split()[1].strip('\"').strip(\"'\")\n #except: module = ''\n try: _import = likely_import(obj, explicit=explicit)\n except: _import = \"\"\n # try to get the name (or source)...\n if repr(obj).startswith('<'):\n if not byname:\n try: # try to get the source for lambdas and such\n #print(result)\n return getsource(obj, alias=alias)\n except: pass # AttributeError: pass\n try: # get the name (of functions and classes)\n obj = getname(obj)\n except: \n obj = repr(obj)\n #FIXME: what to do about class instances and such?\n # hope that it can be built from the __repr__\n else: obj = repr(obj)\n # we either have __repr__ or __name__\n if obj.startswith('<'):\n raise AttributeError(\"object has no atribute '__name__'\")\n elif alias: result = _import+'%s = %s\\n' % (alias,obj)\n elif _import.endswith('%s\\n' % obj): result = _import\n else: result = _import+'%s\\n' % obj\n #print(result)\n return result\n #XXX: possible failsafe...\n # \"import dill; result = dill.loads(); # repr()\"\n\n\n# backward compatability\n_get_name = getname\ngetblocks_from_history = getblocks\n\ndel sys\n\n\n# EOF\n","sub_path":"dill/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":13095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"162076037","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n###############################################################################\nimport os,shutil,stat\nimport sqlite3 as sqlite\ntmp_db = '/tmp/xx_new_db.sqlite'\nif os.path.exists(tmp_db):\n os.remove(tmp_db)\nshutil.copy(\"/gdata/test-2.3.sqlite\", tmp_db)\nos.chmod(tmp_db, stat.S_IRUSR + stat.S_IWUSR)\nconn = sqlite.connect(tmp_db)\nconn.enable_load_extension(True)\nconn.execute('SELECT load_extension(\"mod_spatialite.so.7\")')\ncursor = conn.cursor()\n###############################################################################\ncursor.execute('CREATE TABLE MyTable (name TEXT NOT NULL, geom BLOB NOT NULL)')\ncursor.execute(\"INSERT INTO MyTable (name, geom) VALUES ('one', GeomFromText('POINT(1 1)'))\")\ncursor.execute(\"INSERT INTO MyTable (name, geom) VALUES ('two', GeomFromText('POINT(2 2)'))\")\ncursor.execute(\"INSERT INTO MyTable (name, geom) VALUES ('three', GeomFromText('POINT(3 3)'))\")\ncursor.execute(\"SELECT name, AsText(geom) FROM MyTable;\")\nfor rec in cursor:\n print(rec)\n###############################################################################\ncursor.execute(\"SELECT pk_uid, name, peoples, AsText(geometry) FROM Towns WHERE pk_uid = 8006\")\nfor rec in cursor:\n print(rec)\ncursor.execute('''UPDATE Towns SET peoples = 150000, name = 'MONZA',\n geometry = GeomFromText('POINT(10 10)', 32632) WHERE pk_uid = 8006''')\ncursor.execute(\"SELECT pk_uid, name, peoples, AsText(geometry) FROM Towns WHERE pk_uid = 8006\")\nfor rec in cursor:\n print(rec)\n###############################################################################\ncursor.execute('BEGIN')\ncursor.execute('CREATE TABLE Villages AS SELECT * FROM Towns WHERE peoples < 500')\nconn.commit()\ncursor.execute('SELECT count(*) FROM Villages')\nfor rec in cursor:\n print(rec)\n###############################################################################\ncursor.execute('BEGIN')\ncursor.execute('CREATE TABLE Metropolis ( Name TEXT NOT NULL, Population INTEGER NOT NULL, Geometry BLOB NOT NULL);')\ncursor.execute('''INSERT INTO Metropolis (Name, Population, Geometry)\n SELECT name, peoples, geometry FROM Towns\n WHERE peoples > 1000000;''')\nconn.commit()\ncursor.execute('SELECT name, population, AsText(geometry) FROM Metropolis')\nfor rec in cursor:\n print(rec)\n###############################################################################\ncursor.execute('DROP TABLE Villages')\ncursor.execute('DROP TABLE Metropolis')\ncursor.execute('VACUUM')\n","sub_path":"pygis_src/ch06_spatialite/sec4_begin_spatialite/test_4_records_x_x.py","file_name":"test_4_records_x_x.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"80855855","text":"import numpy\nimport random\nfrom sklearn.neighbors import KernelDensity\nimport matplotlib.pyplot as plt\nimport math\n\nimport logging\n\nclass kx_en:\n def __init__(self):\n self.n = n\n if n < self.nkm:\n self.nkm = n+1\n self.kms = []\n self.fsel = numpy.ones((self.nkm, self.n), dtype=bool)\n indices = []\n for i in range(self.n):\n index = random.randint(0, self.n - 1)\n while index in indices:\n index = random.randint(0, self.n - 1)\n indices.append(index)\n self.fsel[i, index] = False\n # self.wkm = numpy.zeros((self.nkm,), dtype=numpy.float64)\n self.mode = 1 # 0: vote; 1: distance\n\n def fit(self, X, y):\n pout = numpy.zeros((self.nkm, X.shape[0]), dtype=numpy.int64)\n for i in range(0, self.nkm):\n self.kms.append(kx())\n pout[i, :] = self.kms[i].fit(X[:, self.fsel[i, :]], y).reshape(-1)\n\n from scipy import stats\n mode = stats.mode(pout, axis=0)\n return mode[0].reshape(-1)\n\n def fit_labeled(self, xi, yi):\n for i in range(0, self.nkm):\n self.kms[i].fit_labeled(xi[:, self.fsel[i, :]], yi)\n\n def fit_unlabeled(self, xi):\n pucls = numpy.zeros((self.nkm,), dtype=numpy.int64)\n puacpt = numpy.zeros((self.nkm,), dtype=bool)\n pudist = numpy.zeros((self.nkm,), dtype=numpy.float64)\n for i in range(0, self.nkm):\n pucls[i], puacpt[i], pudist[i] = self.kms[i].fit_unlabeled(xi[self.fsel[i, :]])\n\n puclsacpt = pucls[puacpt]\n if self.mode == 0:\n from scipy import stats\n mode = stats.mode(puclsacpt)\n if len(puclsacpt) > 0:\n return mode[0][0], True\n else:\n return -1, False\n else:\n if len(puclsacpt) > 0:\n pudist = pudist[puacpt]\n neari = numpy.argmin(pudist)\n if isinstance(neari, numpy.ndarray):\n return puclsacpt[neari[0]], True\n else:\n return puclsacpt[neari], True\n else:\n return -1, False\n\n\n def predict(self, X):\n pout = numpy.zeros((self.nkm, X.shape[0]), dtype=numpy.int64)\n pdist = numpy.zeros((self.nkm, X.shape[0]), dtype=numpy.float64)\n for i in range(0, self.nkm):\n out, dist = self.kms[i].predict(X[:, self.fsel[i, :]])\n pout[i, :], pdist[i, :] = out.reshape(-1), dist.reshape(-1)\n\n if self.mode == 0:\n from scipy import stats\n mode = stats.mode(pout, axis=0)\n return mode[0].reshape(-1)\n else:\n neari = numpy.argmin(pdist, axis=0)\n if isinstance(neari, numpy.ndarray):\n return pout[neari[0], :].reshape(-1)\n else:\n return pout[neari, :].reshape(-1)\n\nclass KDE:\n\n def __init__(self, kernel='gaussian', bandwidth=-1):\n # bins: int, sequence, string(method)\n self.kernel = kernel\n self.bandwidth = bandwidth\n self.nk = 0\n\n def get_kernel_density(self, X):\n # Grid search best bandwidth using Cross-Validation\n if self.bandwidth == -1:\n if X.shape[0] > 10:\n from sklearn.model_selection import GridSearchCV\n grid = GridSearchCV(KernelDensity(),\n {'bandwidth': numpy.linspace(0.1, 1.0, 30)},\n cv=10) # 20-fold cross-validation\n grid.fit(X)\n bandwidth = grid.best_params_['bandwidth']\n else:\n bandwidth = 0.2\n\n \"\"\"Kernel Density Estimation with Scikit-learn\"\"\"\n kde_skl = KernelDensity(bandwidth=bandwidth)\n kde_skl.fit(X)\n pdf = kde_skl\n\n return pdf\n\n def fit(self, X, T):\n T_unique = sorted(numpy.unique(T))\n clusters = []\n classes = []\n logpriors = []\n for t in T_unique:\n t_samples = (T == t)\n pdf = self.get_kernel_density(X[t_samples, :])\n clusters.append(pdf)\n classes.append(t)\n logpriors.append(numpy.log(X[t_samples, :].shape[0] / X.shape[0]))\n self.clusters = clusters\n self.nk = len(clusters)\n self.classes = classes\n self.X = X\n self.T = T\n self.logpriors = logpriors\n\n return self.predict(X)\n\n def predict_proba(self, X):\n logprobs = numpy.vstack([pdf.score_samples(X)\n for pdf in self.clusters]).T\n result = numpy.exp(logprobs + self.logpriors)\n return result / result.sum(1, keepdims=True)\n\n def predict(self, X):\n if len(X.shape) == 1:\n X = X.reshape((1, X.shape[0]))\n m = X.shape[0]\n n = X.shape[1]\n proba = self.predict_proba(X).T\n y = [self.classes[i] for i in numpy.argmax(proba, axis=0)]\n\n return y\n\n def fit_labeled(self, xi, yi):\n xi = xi.reshape((1, xi.shape[0]))\n yi = yi.reshape((1,))\n # inefficient way\n self.X = numpy.concatenate((self.X, xi), axis=0)\n self.T = numpy.concatenate((self.T, yi))\n self.fit(self.X, self.T)\n return self.predict(xi)[0]\n\n def fit_unlabeled(self, xi):\n # inefficient way\n yi = self.predict(xi)[0]\n return self.fit_labeled(xi, yi), True\n\n def fit_unlabeled_batch(self, X):\n return 0\n\n","sub_path":"kx.py","file_name":"kx.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"292440671","text":"import guess_number\nimport random\n\n\ndef guessing_game():\n numbers_list = guess_number.list_of_numbers()\n print(numbers_list)\n number = random.choice(numbers_list)\n print(number)\n name = input('Hello! What is your name? ')\n print(f'Well, {name}, I am thinking of a number'\n ' between 1 and 20.')\n correct_answer = False\n guess_counter = 1\n while correct_answer is False:\n user_guess = int(input('Take a guess. \\n'))\n if user_guess == number:\n correct_answer = True\n print(f'Good job, {name}! You guessed my'\n f' number in {guess_counter} guesses!')\n exit()\n elif user_guess < number:\n correct_answer = False\n guess_counter += 1\n print('Your guess is too low.')\n elif user_guess > number:\n correct_answer = False\n guess_counter += 1\n print('Your guess is too high.')\n\n\n# guessing_game()\n","sub_path":"optional/number guessing game/numberguessinggame.py","file_name":"numberguessinggame.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"300968986","text":"import random\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\nREGION_ACCOUNT_MAP = {\n \"us-east-1\": \"865070037744\",\n \"us-east-2\": \"057799348421\",\n \"us-west-2\": \"594846645681\",\n \"eu-west-1\": \"985815980388\",\n \"eu-central-1\": \"446921602837\",\n \"ap-northeast-1\": \"977537786026\",\n \"ap-northeast-2\": \"745090734665\",\n \"ap-southeast-2\": \"666831318237\",\n \"ap-southeast-1\": \"192199979996\",\n \"ap-south-1\": \"077584701553\",\n \"ca-central-1\": \"470592106596\",\n \"eu-west-2\": \"856760150666\",\n \"us-west-1\": \"382657785993\",\n \"eu-west-3\": \"843114510376\",\n \"eu-north-1\": \"136758871317\",\n \"sa-east-1\": \"270155090741\",\n \"ap-east-1\": \"822005858737\",\n}\n\nMODEL_PACKAGE_ARN = (\n \"arn:aws:sagemaker:%s:%s:model-package/vitechlab-ppe-model-v3-1-290ec9dbb1ff9b555c3e5e6831fe4769\"\n)\n\ncolors = [\n # Bare Head\n [0.716, 0.170, 0.427],\n\n # Helmet\n [0.388, 0.631, 0.111],\n\n # Welding Mask\n [0.098, 0.680, 0.468],\n\n # Ear Protection\n [0.163, 0.075, 0.997],\n\n # NO Visibility Vest\n [0.849, 0.471, 0.491],\n\n # High Visibility Vest\n [0.595, 0.554, 0.006],\n\n # Person\n [0.699, 0.193, 0.917]\n]\n\nclasses = ['Bare Head', 'Helmet', 'Welding Mask', 'Ear Protection',\n 'NO Visibility Vest', 'High Visibility Vest', 'Person']\n\n\ndef get_model_package_arn(region):\n account = REGION_ACCOUNT_MAP[region]\n \n return MODEL_PACKAGE_ARN % (region, account)\n\n\ndef visualize_detection(img_file, dets, save_path=None):\n img=mpimg.imread(img_file)\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n \n for det in dets:\n x0, y0, x1, y1, score, _, klass = det\n \n cls_id = int(klass)\n xmin = x0\n ymin = y0\n xmax = x1\n ymax = y1\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=3.5)\n plt.gca().add_patch(rect)\n class_name = str(cls_id)\n if classes and len(classes) > cls_id:\n class_name = classes[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.3),\n fontsize=12, color='white')\n plt.axis('off')\n \n if save_path is not None:\n plt.savefig(save_path, bbox_inches='tight')\n \n plt.show()\n","sub_path":"Construction-PPE-Detector/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396296053","text":"\n# coding: utf-8\n\n# In[24]:\n\n\nimport os\nimport math\nimport glob\nimport re\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nplt.rcParams[\"svg.fonttype\"]=\"none\"\n\n\n# In[25]:\n\n\nfolder_path=\"/Users/laurentpottier/Documents/LP/Recherches/Projet_Fondation/Langages&Maths/Anaconda/LPanalyse/PopPlinn/class/PopPlinnTxtSel\"\nfilelist = []\nfor path, dirs, files in os.walk(folder_path):\n for filename in files:\n if 'txt' in filename :\n filelist.append(filename)\n#print (filelist)\nprint(len(filelist))\n\n\n#get classes in string\n#class_path=os.chdir(\"/Users/Vivo-Na/Desktop/class/\")\n#class_path=os.chdir(\"/Users/laurentpottier/Documents/LP/Recherches/Projet_Fondation/Langages&Maths/Anaconda/FromNa1/V9-28juin/class/\")\n\nclass_path=os.chdir(\"/Users/laurentpottier/Documents/LP/Recherches/Projet_Fondation/Langages&Maths/Anaconda/LPanalyse/PopPlinn/class/PopPlinnTxtSel/\")\n\n#filetext=\"03yes_class_reduced.txt\"\n#with open(filetext) as f:\n# classes = f.read().splitlines()\n#print(classes) \n\n\n#filetext=\"03popplinn_class_reduced.txt\"\n#with open(filetext) as f:\n# classes = f.read().splitlines()\n#print(classes) \n\n\n# In[26]:\n\n\ndef f_to_midi (f) :\n return 69+12*math.log(f/440,2)\n\nprint (\"note de freq 261Hz :\" , f_to_midi (261))\n\ndef midi_to_f (n) :\n return 440*2**((n-69)/12)\n\nprint (\"frequence de note 60 :\" , midi_to_f (60), \"Hz\")\n\n\n# In[27]:\n\n\n\nf_ls = [21.5,32.3,43.1,53.8,64.6,86.1,107.7,140.0,172.3,215.3,269.2,344.5,441.4,549.1,699.8,872.1,1109.0,1388.9,1755.0,2217.9,2788.5,3520.7,4435.8,5587.9,7041.4,8871.7,11175.7,14071.9]\n\nL = [] # liste des tailles des lignes\nfor k in range(27):\n L.append(4+2*k)\n\nf_c = [] # frequences centrales des bandes\nfor i in range(len(f_ls)-1):\n f_c.append(math.sqrt(f_ls[i+1]*f_ls[i]))\n \nf_c_midi = []\nfor i in range(len(f_c)):\n f_c_midi.append(f_to_midi(f_c[i]))\n\nf_c_moy = 0 \nfor i in range(len(f_c)):\n f_c_moy += f_c[i]\n f_c_moy /= 27\n \nf_c_gmoy = 0\nfor i in range(len(f_c)):\n f_c_gmoy += math.log(f_c[i], 2)\n f_c_gmoyR = 2**(f_c_gmoy/27)\n\n#formule equivalente\n#f_c_gmoy2 = 0\n#for i in range(len(f_c)):\n# f_c_gmoy2 += f_to_midi(f_c[i])\n# f_c_gmoyR2 = midi_to_f(f_c_gmoy2/27)\n\n \nprint(\"len(f_ls) :\", len(f_ls), \"bornes\")\nW=[]\nfor i in range(len(f_ls)-1):\n W.append(round(f_ls[i+1]-f_ls[i], 2))\nprint(\"w :\" , W) \n \nprint (\"f_c :\", f_c)\nprint (\"f_c_moy :\", round(f_c_moy,1), \"Hz\") # (en Hz) moyenne des frequences des centres des bandes \nprint (\"f_c_gmoyR :\", round(f_c_gmoyR,1), \"Hz\") # (en Hz) moyenne des centres calculée par les notes MIDI\n#print (\"f_c_gmoyR :\", f_c_gmoyR2) # (en Hz) moyenne des centres calculée par les notes MIDI\n\nf_cA = np.asarray(f_c)\nf_cA = f_cA[:, np.newaxis]\n#print (round(3.149 , 2))\n\n\n# In[28]:\n\n\n# read filetxt and generate array S\ndef read(filetext):\n with open(filetext) as f:\n mylist = f.read().splitlines()\n for x in range(8):\n mylist.pop(0)\n S=[]\n for element in reversed(mylist):\n element2=[float(i) for i in element.split()]\n S.append(element2)\n return S\n \n\n\n# In[29]:\n\n\nfile1 = \"PopPlinn_sr44100_deb00_00_00_t02_00_pas02_00.txt\"\nS1 = read(file1)\nfile2 = \"PopPlinn_sr44100_deb01_00_00_t02_00_pas02_00.txt\"\nS2 = read(file2)\n\n\n# In[30]:\n\n\n# plus file name unite comme secondes \ndef timetxt (filetext):\n str_L = filetext.rsplit(sep='_')\n test = 0\n result = 0\n for str in str_L:\n if test == 3:\n cent = int(str)\n result+=(cent/100)\n test = 0\n if test == 2:\n sec = int(str)\n test = 3\n result+=sec\n if re.search('deb', str):\n test = 1\n min = int(str[4-5])\n result+=(min*60)\n test = 2\n return result\n\nprint(timetxt(file2))\n\n\n# In[31]:\n\n\ndef ampmax(S, k):\n ampmx = 0\n for j in range(4+2*k):\n ampmx = max(ampmx, S[k][j])\n return ampmx\n\ndef ampmin(S, k):\n ampmn = 1\n for j in range(4+2*k):\n ampmn = min(ampmn, S[k][j])\n return ampmn\n\ndef deltaamp(S, k):\n return ampmax(S, k) - ampmin(S, k)\n\ndef moy_ecart_amps(S):\n ecarts = []\n for k in range(26):\n ecarts.append(deltaamp(S, k))\n ecartsA = np.asarray(ecarts)\n return ecartsA.mean()\n \n\n#for k in range(27):\n# print(\"k =\",1+k ,\"amp min=\", round(ampmin(S1, k),2),\"amp max=\", round(ampmax(S1, k),2), \"ecart\" ,round(deltaamp(S1, k), 2))\nfor k in range(27):\n print(\"k =\",1+k ,\"amp min=\", round(ampmin(S2, k),2),\"amp max=\", round(ampmax(S2, k),2), \"ecart\" ,round(deltaamp(S2, k), 2))\n \nprint(\"moyenne des écarts d'amplitudes :\",round(moy_ecart_amps (S1),3))\nprint(\"moyenne des écarts d'amplitudes :\",round(moy_ecart_amps (S2),3))\n\n\n# In[64]:\n\n\ndef array_eucl_dist(Arr1, Arr2):\n res = np.subtract(Arr1, Arr2)\n res = np.multiply(res, res)\n res = np.sum(res)\n return res**0.5\n\ndef selfsimil (S, k):\n dist = 0\n distmin = 1\n distmax = 0\n for j in range(3+2*k):\n d = (array_eucl_dist(S[k], np.roll(S[k],j+1))/(1+k))\n dist += d\n distmin = min(distmin, d)\n distmax = max(distmax, d)\n print(d)\n return [dist, distmin, distmax]\n\nnp.round_(selfsimil (S2, 15), 4)\n\n\n#voir les zero cross de la dérivée de d = nbre de cycles\n# ecarts, pics\n\n","sub_path":"V3D/DescripteursV3d-Rythm.py","file_name":"DescripteursV3d-Rythm.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"42662175","text":"import numpy as np\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\nfrom scipy.misc import derivative\n\n#Problem 1\ndef newtonsMethod(f, x0, tol=1e-7, df=None):\n if df is None:\n df = lambda x: derivative(f, x, dx=1e-5)\n \n x = x0\n while(sp.absolute(float(f(x))/df(x)) >= tol):\n x -= float(f(x))/df(x)\n return x\n\n#Problem 2\ndef Problem2():\n def f(x):\n return x**(1./3)\n \n return f(np.random.rand(100))\n\n#Problem 3\ndef Problem3(npts=800):\n f = lambda x: x**3 - 2*x + 1./2\n df = lambda x: 3*x**2 - 2.\n \n x = np.random.uniform(-2, 2, size=(npts,))\n \n r = [newtonsMethod(f, x0, df=df) for x0 in x]\n y = f(x)\n \n plt.plot(x, r, '.', x, y, '.')\n plt.show()\n \n#Problem 4\ndef newtonsMatrix(f, x, J=None, tol=1e-7):\n\n# try:\n# ndim = len(f(*inputs)), len(inputs)\n# except TypeError:\n# ndim = 1, len(inputs)\n# \n# jacobian = sp.zeros(ndim)\n# \n# for j in xrange(ndim[1]):\n# jacobian[:,j] = cdiff(func, inputs, vary=[j], accur=4\n# use scipy deriviative with 1e-5\n \n\n\tdef jacobian(f, x):\n\t\n\t \n\t\tdef replace( A,a,i):\n\t\t\tR=A.copy() #This line caused me a lot of problems\n\t\t\tR[i]=a\n\t\t\treturn R\n\t\t\t\n\t\tJ = np.zeros((len(x),len(x)))\n\t\tfor i in range(len(x)):\n\t\t\tfor j in range(len(x)):\n\t\t\t\t#Is there a better way to do a partial derivative?\n\t\t\t\tJ[i,j] = derivative(lambda a: F(replace(x,a,i))[j],x[i])\n\t\treturn J\n\t\t\n\tif J is None:\n\t J = lambda x: jacobian(f, x)\n\t\t\n\tinc = la.solve(J(x), f(x))\n\twhile(np.absolute(inc).max() >= tol):\n\t\tx -= inc\n\t\tinc = la.solve(J(x), f(x))\n\treturn x\t\t\n\t\t\n\t\t\n\n","sub_path":"Algorithms/NewtonsMethod/newton.py","file_name":"newton.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"119644142","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tools import get_array, dags, names\n\ndata = dags[0]\n\nfind_or_add_index = get_array(data, \"find_or_add index\")\nfind_or_add_time = get_array(data, \"find_or_add add time\")\nfind_or_add_add = get_array(data, \"find_or_add is add\")\nfind_or_add_level = get_array(data, \"find_or_add level\")\n\nkwargs = {\"linestyle\": \"None\", \"marker\": \"o\", \"markersize\": 3}\n\nplt.xlabel(\"index\")\nplt.ylabel(\"time\")\n\nindices_add = find_or_add_add == 1\nindices_noadd = find_or_add_add == 0\n\nprint(\"Num add: \", np.sum(indices_add))\nprint(\"Num no add: \", np.sum(indices_noadd))\n\nplt.title(names[0])\n\nassert np.sum(indices_add) + np.sum(indices_noadd) == len(find_or_add_index)\n\nplot_index = 0\nfor level in range(32):\n level_indices = level == find_or_add_level\n if np.sum(level_indices) == 0:\n continue\n\n plot_index += 1\n plt.subplot(2, 3, plot_index)\n plt.title(\"level = \" + str(level))\n plt.plot(find_or_add_index[np.logical_and(indices_add, level_indices)],\n find_or_add_time[np.logical_and(indices_add, level_indices)], color=\"red\", **kwargs)\nplt.show()\n","sub_path":"python/plot_find_add_add_position_vs_time.py","file_name":"plot_find_add_add_position_vs_time.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89499147","text":"#!/usr/bin/python\n\nimport _thread\nimport time\nimport random\nimport socket\n\n# UDP server(drone status thread)\nSTATUS_SERVER_ADDR = \"0.0.0.0\"\nSTATUS_SERVER_PORT = 8890\n\nDRONE_COMMAND_ADDR = (\"192.168.10.1\", 8889)\nRETRY_DELAY = 1\n\nBUFFER_SIZE = 1024\n\nSTATUS_DISPLAY_INTERVAL = 1\nMAX_RETRY = 3\n\ndroneStatus = {\n \"mid\" : -100,\n \"x\" : -100,\n \"y\" : -100,\n \"z\" : -100,\n \"h\" : -100,\n \"baro\" : -100,\n \"time\" : -100,\n \"agx\" : -100,\n \"agy\" : -100,\n \"agz\" : -100,\n \"pitch\" : -100,\n \"roll\" : -100,\n \"yaw\" : -100,\n \"vgx\" : -100,\n \"vgy\" : -100,\n \"vgz\" : -100,\n \"templ\" : -100,\n \"temph\" : -100,\n \"tof\" : -100,\n \"bat\" : -100\n}\n\n\n# Define a function for the thread\ndef statusServer():\n global droneStatus\n server = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n server.bind((STATUS_SERVER_ADDR, STATUS_SERVER_PORT)) \n print(\"Status server is ready\")\n\n while(True):\n try:\n recvData = server.recvfrom(BUFFER_SIZE)[0].decode(\"utf-8\")\n # break down data in to object\n tmpParams = recvData.split(\";\")\n for param in tmpParams:\n pairVal = param.split(\":\")\n if pairVal[0] != \"mpry\" and len(pairVal) > 1:\n droneStatus[pairVal[0]] = float(pairVal[1])\n except Exception as e:\n print(\"analyzer\" + e)\n\ndef statusDebug():\n while True:\n time.sleep(STATUS_DISPLAY_INTERVAL)\n print(\"\\t\\t\\t\" + str(droneStatus))\n\ndef sendCommand(cmd,udpSocket,wait = True):\n COMMAND = str.encode(cmd)\n try:\n sendOK = False\n print(\"# send \" + cmd + \" ...\")\n udpSocket.sendto(COMMAND, DRONE_COMMAND_ADDR)\n\n if not wait:\n return\n\n recvMsg = udpSocket.recvfrom(BUFFER_SIZE)[0].decode(\"utf-8\") \n \n while not sendOK:\n if recvMsg == \"ok\":\n print(\">> OK\")\n break\n elif recvMsg == \"error\":\n print(\">> Error\")\n break\n elif recvMsg != \"\": \n print(\">> \" + recvMsg)\n break\n else:\n recvMsg = udpSocket.recvfrom(BUFFER_SIZE)[0].decode(\"utf-8\") \n time.sleep(RETRY_DELAY)\n except Exception as e:\n print(e)\n\ndef udpClient():\n global droneStatus\n\n COMMAND = str.encode(\"command\")\n TAKEOFF = str.encode(\"takeoff\")\n ENABLE_MPAD = str.encode(\"mon\")\n\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n \n\n\n\n \"\"\" ### COMMANDS LIST ###\n mdirection x=0/1/2 [downward,foward,both]\n rc a b c d [LR,FB,UD,yaw] +-100\n takeoff\n stop\n land\n \"\"\"\n\n # 制御コマンドはここから\n # enable dev mode\n sendCommand(\"command\",UDPClientSocket)\n # enable mission pad\n sendCommand(\"mon\",UDPClientSocket)\n sendCommand(\"mdirection 0\",UDPClientSocket)\n # take off\n sendCommand(\"takeoff\",UDPClientSocket)\n #time.sleep(COMMAND_DELAY)\n\n if droneStatus[\"mid\"] != 1:\n # move left\n sendCommand(\"left 50\",UDPClientSocket)\n if droneStatus[\"mid\"] != 1:\n # move right\n sendCommand(\"right 100\",UDPClientSocket)\n if droneStatus[\"mid\"] != 1:\n sendCommand(\"left 50\",UDPClientSocket)\n if droneStatus[\"mid\"] != 1:\n sendCommand(\"foward 50\",UDPClientSocket)\n if droneStatus[\"mid\"] != 1:\n sendCommand(\"back 100\",UDPClientSocket)\n\n if droneStatus[\"mid\"] != 1:\n print(\"cannot find mpad 1\")\n else:\n print(\"found mpad 1\")\n sendCommand(\"rc 0 20 0 0\",UDPClientSocket,False)\n print(\"Waiting for pad2\")\n while droneStatus[\"mid\"] != 2:\n pass\n print(\"Found mpad2\")\n # turnoff rc\n sendCommand(\"rc 0 0 0 0\",UDPClientSocket,False)\n sendCommand(\"land\",UDPClientSocket)\n\n \n \n\ndef main():\n try:\n # get and update drone status obj\n _thread.start_new_thread( statusServer,())\n # msg sender thread\n _thread.start_new_thread( udpClient,())\n # print out the current drone status\n _thread.start_new_thread( statusDebug,())\n except:\n print(\"Error: unable to start thread\")\n\n while 1:\n pass\n\nif __name__ == \"__main__\":\n main()","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"316443448","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n/***************************************************************************\n CSVTools\n A QGIS plugin\n Adds new processing algorithms and models that deal with CSV files\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\n -------------------\n begin : 2019-02-19\n copyright : (C) 2019 by Yann Voté\n email : ygversil@lilo.org\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\n__author__ = 'Yann Voté'\n__date__ = '2019-02-19'\n__copyright__ = '(C) 2019 by Yann Voté'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport pathlib\nimport urllib\n\nfrom PyQt5.QtGui import QIcon\nfrom processing.algs.qgis.QgisAlgorithm import QgisAlgorithm\nfrom qgis.core import (\n Qgis,\n QgsFeatureSink,\n QgsMessageLog,\n QgsProcessing,\n QgsProcessingException,\n QgsProcessingParameterBoolean,\n QgsProcessingParameterCrs,\n QgsProcessingParameterEnum,\n QgsProcessingParameterFeatureSink,\n QgsProcessingParameterFile,\n QgsProcessingParameterString,\n QgsVectorLayer,\n)\n\n\nclass LoadCSVAlgorithm(QgisAlgorithm):\n \"\"\"QGIS algorithm that takes a CSV file and loads it as a vector layer.\"\"\"\n\n # Constants used to refer to parameters and outputs. They will be\n # used when calling the algorithm from another algorithm, or when\n # calling from the QGIS console.\n INPUT = 'INPUT'\n OUTPUT = 'OUTPUT'\n DELIMITER = 'DELIMITER'\n QUOTECHAR = 'QUOTE_CHAR'\n USE_HEADER = 'USE_HEADER'\n DECIMAL_POINT = 'DECIMAL_POINT'\n GEOMETRY_DATA = 'GEOMETRY_DATA'\n WKT_FIELD = 'WKT_FIELD'\n X_FIELD = 'X_FIELD'\n Y_FIELD = 'Y_FIELD'\n CRS = 'CRS'\n\n def initAlgorithm(self, config):\n \"\"\"Initialize algorithm with inputs and output parameters.\"\"\"\n self.addParameter(QgsProcessingParameterFile(\n self.INPUT,\n self.tr('Input CSV file'),\n extension='csv',\n ))\n self.delimiters = [',', ';', '|', 't']\n self.addParameter(QgsProcessingParameterEnum(\n self.DELIMITER,\n self.tr('Column delimiter'),\n options=self.delimiters,\n defaultValue=0,\n ))\n self.addParameter(QgsProcessingParameterString(\n self.QUOTECHAR,\n self.tr('Character used to quote columns'),\n defaultValue='\"',\n ))\n self.addParameter(QgsProcessingParameterBoolean(\n self.USE_HEADER,\n self.tr('Is the first line headers ?'),\n defaultValue=True,\n ))\n self.decimal_points = ['.', ',']\n self.addParameter(QgsProcessingParameterEnum(\n self.DECIMAL_POINT,\n self.tr('Decimal point'),\n options=self.decimal_points,\n defaultValue=0,\n ))\n self.geometry_data = [\n self.tr('WKT column'),\n self.tr('X/Y (or longitude/latitude) columns'),\n self.tr('No Geometry'),\n ]\n self.addParameter(QgsProcessingParameterEnum(\n self.GEOMETRY_DATA,\n self.tr('How geometry is given ?'),\n options=self.geometry_data,\n defaultValue=2,\n ))\n self.addParameter(QgsProcessingParameterString(\n self.WKT_FIELD,\n self.tr('Geometry column, as WKT (if WKT column selected)'),\n optional=True,\n ))\n self.addParameter(QgsProcessingParameterString(\n self.X_FIELD,\n self.tr('X/longitude column (if X/Y column selected)'),\n optional=True,\n ))\n self.addParameter(QgsProcessingParameterString(\n self.Y_FIELD,\n self.tr('Y/latitude column (if X/Y column selected)'),\n optional=True,\n ))\n self.addParameter(QgsProcessingParameterCrs(\n self.CRS,\n self.tr('CRS (if geometry given)'),\n optional=True,\n ))\n self.addParameter(QgsProcessingParameterFeatureSink(\n self.OUTPUT,\n self.tr('CSV layer'),\n QgsProcessing.TypeVector\n ))\n\n def name(self):\n \"\"\"Algorithm identifier.\"\"\"\n return 'loadcsvfile'\n\n def displayName(self):\n \"\"\"Algorithm human name.\"\"\"\n return self.tr('Create vector layer from CSV file')\n\n def groupId(self):\n \"\"\"Algorithm group identifier.\"\"\"\n return 'importfromcsv'\n\n def group(self): # Cannot be factored in abstract class because of i18n\n \"\"\"Algorithm group human name.\"\"\"\n return self.tr('Import from CSV')\n\n def shortHelpString(self):\n \"\"\"Algorithm help message displayed in the right panel.\"\"\"\n return self.tr(\n \"This algorithm loads a CSV file as a vector layer, with or \"\n \"without geometry. If present, geometry may be given as one WKT \"\n \"column or as two X/Y columns.\"\n )\n\n def icon(self):\n \"\"\"Algorithm's icon.\"\"\"\n return QIcon(':/plugins/csv_tools/load_csv.png')\n\n def processAlgorithm(self, parameters, context, feedback):\n \"\"\"Actual processing steps.\"\"\"\n uri = self._buildUri(parameters, context)\n vlayer = QgsVectorLayer(uri, \"layername\", \"delimitedtext\")\n if not vlayer.isValid():\n QgsMessageLog.logMessage(\n 'CSV Tools: Cannot add layer with URI {}'.format(\n vlayer.dataProvider().dataSourceUri()\n ),\n 'Processing',\n Qgis.Critical\n )\n QgsMessageLog.logMessage(\n 'CSV Tools: {}'.format(\n vlayer.dataProvider().error().message()\n ),\n 'Processing',\n Qgis.Critical\n )\n raise QgsProcessingException(\n '{}: {}'.format(\n vlayer.dataProvider().dataSourceUri(),\n vlayer.dataProvider().error().message()\n )\n )\n # We consider that having CSV data loaded is half the way\n feedback.setProgress(50)\n (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT,\n context, vlayer.fields(),\n vlayer.wkbType(), vlayer.crs())\n if sink is None:\n raise QgsProcessingException(\n self.invalidSinkError(parameters, self.OUTPUT)\n )\n count = vlayer.featureCount()\n total = 100.0 / count if count else 0\n features = vlayer.getFeatures()\n for i, feature in enumerate(features):\n if feedback.isCanceled():\n break\n sink.addFeature(feature, QgsFeatureSink.FastInsert)\n # Update the progress bar\n feedback.setProgress(50 + int(i * total))\n return {self.OUTPUT: dest_id}\n\n def _buildUri(self, parameters, context):\n \"\"\"Build URI to pass to ``qgis.core.QgsVectorLayer`` from params.\"\"\"\n csv_path = self.parameterAsFile(parameters, self.INPUT, context)\n delimiter = self.parameterAsEnum(parameters, self.DELIMITER, context)\n delimiter = self.delimiters[delimiter]\n quotechar = self.parameterAsString(parameters, self.QUOTECHAR, context)\n use_header = self.parameterAsBool(parameters, self.USE_HEADER,\n context)\n decimal_point = self.parameterAsEnum(parameters, self.DECIMAL_POINT,\n context)\n decimal_point = self.decimal_points[decimal_point]\n geometry_data = self.parameterAsEnum(parameters, self.GEOMETRY_DATA,\n context)\n wkt_field = self.parameterAsString(parameters, self.WKT_FIELD, context)\n x_field = self.parameterAsString(parameters, self.X_FIELD, context)\n y_field = self.parameterAsString(parameters, self.Y_FIELD, context)\n crs = self.parameterAsCrs(parameters, self.CRS, context)\n base_uri = pathlib.Path(csv_path).as_uri()\n params = (\n ('type', 'csv'),\n ('useHeader', 'Yes' if use_header else 'No'),\n ('decimalPoint', decimal_point),\n ('trimFields', 'Yes'),\n ('detectTypes', 'yes'),\n ('subsetIndex', 'no'),\n ('watchFile', 'no'),\n )\n if delimiter != ',':\n params += (('delimiter', delimiter),)\n if quotechar != '\"':\n params += (('quote', quotechar),)\n if geometry_data == 0:\n params += (\n ('wktField', wkt_field),\n ('crs', crs.authid()),\n ('spatialIndex', 'yes'),\n )\n return '{base_uri}?{params}'.format(\n base_uri=base_uri,\n params=urllib.parse.urlencode(params, safe=r'\\:')\n )\n elif geometry_data == 1:\n params += (\n ('xField', x_field),\n ('yField', y_field),\n ('crs', crs.authid()),\n ('spatialIndex', 'yes'),\n )\n return '{base_uri}?{params}'.format(\n base_uri=base_uri,\n params=urllib.parse.urlencode(params, safe=r'\\:')\n )\n else:\n params += (\n ('geomType', 'none'),\n ('spatialIndex', 'no'),\n )\n return '{base_uri}?{params}'.format(\n base_uri=base_uri,\n params=urllib.parse.urlencode(params, safe=r'\\:')\n )\n","sub_path":"import_from_csv_algorithms.py","file_name":"import_from_csv_algorithms.py","file_ext":"py","file_size_in_byte":10302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"100738742","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os,cv2\nfrom sklearn.utils import shuffle\nfrom scipy.misc import imread,imresize\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nimport imgaug as ia\nfrom skimage.color import rgba2rgb\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nplt.style.use('seaborn-white')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nnp.random.seed(6278)\ntf.set_random_seed(6728)\nia.seed(6278)\n\ndef tf_elu(x): return tf.nn.elu(x)\ndef d_tf_elu(x): return tf.cast(tf.greater(x,0),tf.float32) + (tf_elu(tf.cast(tf.less_equal(x,0),tf.float32) * x) + 1.0)\ndef tf_softmax(x): return tf.nn.softmax(x)\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n# code from: https://github.com/tensorflow/tensorflow/issues/8246\ndef tf_repeat(tensor, repeats):\n \"\"\"\n Args:\n\n input: A Tensor. 1-D or higher.\n repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input\n\n Returns:\n \n A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats\n \"\"\"\n expanded_tensor = tf.expand_dims(tensor, -1)\n multiples = [1] + repeats\n tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)\n repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)\n return repeated_tesnor\n\n# ================= VIZ =================\ndef show_hist_of_weigt(all_weight_list,status='before'):\n fig = plt.figure()\n weight_index = 0\n\n for i in range(1,1+int(len(all_weight_list)//3)):\n ax = fig.add_subplot(1,4,i)\n ax.grid(False)\n temp_weight_list = all_weight_list[weight_index:weight_index+3]\n for temp_index in range(len(temp_weight_list)):\n current_flat = temp_weight_list[temp_index].flatten()\n ax.hist(current_flat,histtype='step',bins='auto',label=str(temp_index+weight_index))\n ax.legend()\n ax.set_title('From Layer : '+str(weight_index+1)+' to '+str(weight_index+3))\n weight_index = weight_index + 3\n plt.savefig('viz/weights_'+str(status)+\"_training.png\")\n plt.close('all')\n\n# Def: Simple function to show 9 image with different channels\ndef show_9_images(image,layer_num,image_num,channel_increase=3,alpha=None,gt=None,predict=None):\n image = (image-image.min())/(image.max()-image.min())\n fig = plt.figure()\n color_channel = 0\n limit = 10\n if alpha: limit = len(gt)\n for i in range(1,limit):\n ax = fig.add_subplot(3,3,i)\n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([])\n if alpha:\n ax.set_title(\"GT: \"+str(gt[i-1])+\" Predict: \"+str(predict[i-1]))\n else:\n ax.set_title(\"Channel : \" + str(color_channel) + \" : \" + str(color_channel+channel_increase-1))\n ax.imshow(np.squeeze(image[:,:,color_channel:color_channel+channel_increase]))\n color_channel = color_channel + channel_increase\n \n if alpha:\n plt.savefig('viz/z_'+str(alpha) + \"_alpha_image.png\")\n else:\n plt.savefig('viz/'+str(layer_num) + \"_layer_\"+str(image_num)+\"_image.png\")\n plt.close('all')\n# ================= VIZ =================\n\n# ================= DATA AUGMENTATION =================\nseq = iaa.Sequential([\n iaa.Sometimes(0.5,\n iaa.Fliplr(0.5), # horizontal flips\n ),\n # iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Sometimes(0.5,\n iaa.Affine(\n rotate=(-180, 180),\n ))\n], random_order=True) # apply augmenters in random order\n# ================= DATA AUGMENTATION =================\n\n# ================= LAYER CLASSES =================\nclass CNN():\n \n def __init__(self,k,inc,out):\n self.w = tf.Variable(tf.truncated_normal([k,k,inc,out],stddev=0.05))\n # self.w = tf.Variable(xavier_init_cnn(k,inc,out))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n\n def getw(self): return self.w\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) \n self.layerA = tf_elu(self.layer)\n return self.layerA \n\n def backprop(self,gradient,learning_rate_change,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = d_tf_elu(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_part_3,filter_sizes = self.w.shape,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(input_sizes = [batch_size] + list(grad_part_3.shape[1:]),filter= self.w,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate_change/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass CNN_Trans():\n \n def __init__(self,k,inc,out):\n self.w = tf.Variable(tf.truncated_normal([k,k,inc,out],stddev=0.05))\n # self.w = tf.Variable(xavier_init_cnn(k,inc,out))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n\n def getw(self): return self.w\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n output_shape2 = self.input.shape[2].value * stride\n self.layer = tf.nn.conv2d_transpose(\n input,self.w,output_shape=[batch_size,output_shape2,output_shape2,self.w.shape[2].value],\n strides=[1,stride,stride,1],padding=padding) \n self.layerA = tf_elu(self.layer)\n return self.layerA \n\nclass FNN():\n def __init__(self,input_dim,hidden_dim):\n self.w = tf.Variable(tf.truncated_normal([input_dim,hidden_dim],stddev=0.05))\n # self.w = tf.Variable(xavier_init(input_dim,hidden_dim))\n\n def feedforward(self,input=None):\n self.input = input\n self.layer = tf.matmul(input,self.w)\n self.layerA = tf_elu(self.layer)\n return self.layerA\n\n# ================= LAYER CLASSES =================\n\n# data\nmnist = input_data.read_data_sets('../../Dataset/MNIST/', one_hot=True)\n\nx_data, train_label, y_data, test_label = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\nx_data = x_data.reshape(-1, 28, 28, 1) # 28x28x1 input img\ny_data = y_data.reshape(-1, 28, 28, 1) # 28x28x1 input img\n\ntrain_batch = np.zeros((55000,28,28,1))\ntest_batch = np.zeros((10000,28,28,1))\nfor x in range(len(x_data)):\n train_batch[x,:,:,:] = np.expand_dims(imresize(x_data[x,:,:,0],(28,28)),axis=3)\nfor x in range(len(y_data)):\n test_batch[x,:,:,:] = np.expand_dims(imresize(y_data[x,:,:,0],(28,28)),axis=3)\n\n# print out the data shape\nprint(train_batch.shape)\nprint(train_label.shape)\nprint(test_batch.shape)\nprint(test_label.shape)\n\n# hyper parameter\nnum_epoch = 101\nbatch_size = 100\nprint_size = 10\n\nlearning_rate = 0.0008\nlearnind_rate_decay = 0.0\nbeta1,beta2,adam_e = 0.9,0.999,1e-8\n\n# define class here\nl1 = CNN(3,1,16)\nl2 = CNN(3,16,32)\nl3_prep = FNN(7*7*32,100)\n\nmean_vector,std_vector = FNN(100,3),FNN(100,3)\n\nl4_prep = FNN(3,100)\nl4 = FNN(100,7*7*32)\nl5 = CNN_Trans(3,16,32)\nl52 = CNN(3,16,16)\nl6 = CNN_Trans(3,1,16)\nl62 = CNN(3,1,1)\n\n# graph\nx = tf.placeholder(shape=[batch_size,28,28,1],dtype=tf.float32)\n\nlayer1 = l1.feedforward(x)\nlayer2_Input = tf.nn.avg_pool(layer1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\nlayer2 = l2.feedforward(layer2_Input) \nlayer3_Input = tf.nn.avg_pool(layer2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n\nlayer3_prep_Input = tf.reshape(layer3_Input,[batch_size,-1])\nlayer3_prep = l3_prep.feedforward(layer3_prep_Input)\n\nmean = mean_vector.feedforward(layer3_prep)\nstd = std_vector.feedforward(layer3_prep)\nsamples = tf.random_normal([batch_size,3], 0, 1, dtype=tf.float32)\nz = mean + tf.sqrt(tf.exp(std)) * samples\n\nlayer4_prep = l4_prep.feedforward(z)\nlayer4 = l4.feedforward(layer4_prep)\n\nlayer5_Input = tf.reshape(layer4,[batch_size,7,7,32])\nlayer5 = l5.feedforward(layer5_Input,stride=2)\nlayer52 = l52.feedforward(layer5)\nlayer6 = l6.feedforward(layer52,stride=2)\nlayer62 = l62.feedforward(layer6)\n\n# x_vector = tf.reshape(x,[batch_size,-1])\n# layer72_vector = tf.reshape(layer72,[batch_size,-1])\n# reconstr_loss = -tf.reduce_sum( x_vector * tf.log(1e-10 +layer72_vector) + (1-x_vector) * tf.log(1e-10 + 1 - layer72_vector),axis=1 )\nreconstr_loss2 = tf.reduce_mean(tf.square(layer62-x))\nlatent_loss = -0.5 * tf.reduce_sum(1 + std - tf.square(mean) - tf.exp(std), 1)\ncost = tf.reduce_mean(latent_loss+reconstr_loss2) \nauto_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# sess\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n\n train_cota,train_acca = 0,0\n train_cot,train_acc = [],[]\n \n test_cota,test_acca = 0,0\n test_cot,test_acc = [],[]\n\n # start the training\n for iter in range(num_epoch):\n\n train_batch = shuffle(train_batch)\n\n for batch_size_index in range(0,len(train_batch),batch_size):\n current_batch = train_batch[batch_size_index:batch_size_index+batch_size]\n sess_result = sess.run([cost,auto_train],feed_dict={x:current_batch})\n print(\"Current Iter : \",iter ,\" current batch: \",batch_size_index, ' Current cost: ', sess_result[0],end='\\r')\n train_cota = train_cota + sess_result[0]\n\n if iter % print_size==0:\n print(\"\\n--------------\")\n print('Train Current cost: ', train_cota/(len(train_batch)/(batch_size)),end='\\n')\n print(\"----------\")\n\n if iter % 2 == 0:\n test_example = train_batch[:batch_size,:,:,:]\n test_example_gt = train_batch[:batch_size,:,:,:]\n sess_results = sess.run([layer62],feed_dict={x:test_example})\n\n sess_results = sess_results[0][0,:,:,:]\n test_example = test_example[0,:,:,:]\n test_example_gt = test_example_gt[0,:,:,:]\n\n plt.figure()\n plt.imshow(np.squeeze(test_example),cmap='gray')\n plt.axis('off')\n plt.title('Original Image')\n plt.savefig('train_change/'+str(iter)+\"a_Original_Image.png\")\n\n sess_results[:,:,0] = (sess_results[:,:,0]-sess_results[:,:,0].min())/(sess_results[:,:,0].max()-sess_results[:,:,0].min())\n # sess_results[:,:,1] = (sess_results[:,:,1]-sess_results[:,:,1].min())/(sess_results[:,:,1].max()-sess_results[:,:,1].min())\n # sess_results[:,:,2] = (sess_results[:,:,2]-sess_results[:,:,2].min())/(sess_results[:,:,2].max()-sess_results[:,:,2].min())\n\n plt.figure()\n plt.imshow(np.squeeze(sess_results).astype(np.float32),cmap='gray')\n plt.axis('off')\n plt.title('Generated Mask')\n plt.savefig('train_change/'+str(iter)+\"c_Generated_Mask.png\")\n plt.close('all')\n\n train_cot.append(train_cota/(len(train_batch)/(batch_size)))\n train_cota,train_acca = 0,0\n\n # Normalize the cost of the training\n train_cot = (train_cot-min(train_cot) ) / (max(train_cot)-min(train_cot))\n\n # plot the training and testing graph\n plt.figure()\n plt.plot(range(len(train_acc)),train_acc,color='red',label='acc ovt')\n plt.plot(range(len(train_cot)),train_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Train Average Accuracy / Cost Over Time\")\n plt.savefig(\"viz/Case Train.png\")\n\n\n# -- end code --","sub_path":"NeuralNetwork/Auto/b_minst_kl.py","file_name":"b_minst_kl.py","file_ext":"py","file_size_in_byte":12187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"408971621","text":"\"\"\"\n2. Faça um programa que receba do usuário um arquivo texto e mostre na tela quantas letras\nsão vogais e quantas são consoantes\n\"\"\"\nfrom time import sleep\ndef abrir_arquivo(f):\n vogais = 0\n consoante = 0\n cont = 0\n print(\"Aguarde so um momento o sistema está processado...\")\n sleep(2)\n with open(f) as abrir:\n x = abrir.read()\n texto = x.replace('\\n', '').replace(' ', '').replace('/', '')\n lista = list(texto.upper())\n for c in range(len(lista)):\n if lista[c] in 'AEIOU':\n vogais += 1\n elif lista[c] in 'BCDFGHJKLMNPQRSTVWXYZ':\n consoante += 1\n else:\n cont += 1\n print(f'Vogais: {vogais}')\n print(f'Consoantes: {consoante}')\n print(f\"Carateres não contabilzado: {cont}\")\n abrir.closed\n\nprint(\"Obs: Digite o caminho do arquivo com a barra invertida [/] \\nnesse progama só aceita arquivos .txt\")\nprint(\"-\"*25)\nvogais = 0\narquivo = input(\"Informe o arquivo: \")\nprint(\"==\"*20)\nabrir_arquivo(arquivo)\n\n","sub_path":"lista06/ex002.py","file_name":"ex002.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"311786222","text":"#codeupdate123tes26123estr3\nimport json\nimport boto3\nfrom PyPDF4 import PdfFileReader, PdfFileWriter\nimport os\nfrom io import BytesIO\nfrom datetime import date, time, datetime\nimport psycopg2\nimport uuid\nimport logging\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef rds_connection():\n rds_host = 'bh-dcap.cysf64wiobfj.us-east-1.rds.amazonaws.com'\n name = \"bhadmin\"\n password = \"Baker123\"\n db_name = \"postgres\"\n try:\n conn_string = \"host=%s user=%s password=%s dbname=%s\" % (rds_host, name, password, db_name)\n connection = psycopg2.connect(conn_string)\n logger.info(\"RDS server information :==> \" + str(connection.get_dsn_parameters()))\n except (Exception, psycopg2.Error) as error:\n logger.info(\"Error while connecting to PostgreSQL\"+ error)\n connection.close()\n logger.info(\"RDS connection is closed\")\n\n return connection\n\n\ndef lambda_handler(event, context):\n # bucket = event['bucket_name']\n # key = event['file_name']\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = event['Records'][0]['s3']['object']['key']\n\n # RDS Connection\n conn = rds_connection()\n cursor = conn.cursor()\n table_name = 'vlm_spilt_pdf'\n cursor.execute(\"select * from information_schema.tables where table_name=%s\", ('vlm_spilt_pdf',))\n if bool(cursor.rowcount) == False:\n create_table_query = \"\"\"CREATE TABLE {} (ID VARCHAR(64) PRIMARY KEY,\n FILENAME TEXT NOT NULL, PAGECOUNT INT,\n DATE timestamp NOT NULL, SPLIT_FILES_PATH TEXT,\n ARCHIVE_FILES_PATH TEXT, SPLIT_STATUS TEXT NOT NULL\n );\"\"\".format(table_name)\n cursor.execute(create_table_query)\n conn.commit()\n logger.info(\"Table created successfully in PostgreSQL \")\n else:\n logger.info(\"Table exists already \")\n\n query = \"INSERT INTO vlm_spilt_pdf (ID, FILENAME, PAGECOUNT, DATE, SPLIT_FILES_PATH, ARCHIVE_FILES_PATH, SPLIT_STATUS) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n try:\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = event['Records'][0]['s3']['object']['key']\n \n # bucket = event['bucket_name']\n # key = event['file_name']\n \n \n dirpath = os.path.dirname(key)\n logger.info(\"Connecting to \" + bucket + \" S3 bucket\")\n s3 = boto3.resource('s3')\n client = boto3.client('s3')\n my_bucket = s3.Bucket(bucket)\n\n logger.info(\"Date time folder path\")\n filedt = datetime.utcnow()\n fileDate = filedt.strftime('%Y-%m-%d-%H-%M-%S')\n\n logger.info(\"Copying & deleting of VLM pdf files from landing zone to archive vlm folder\")\n for my_bucket_object in my_bucket.objects.filter(Prefix=dirpath):\n filepath = str(my_bucket_object.key)\n filename = filepath.split(\"/\")[-1]\n print(filename)\n\n if filepath.endswith(\".pdf\"):\n targetkey = \"landing-zone/archive-vlm/\" + fileDate + \"/\" + filename\n response1 = client.copy_object(Bucket=bucket, Key=targetkey,\n CopySource={'Bucket': bucket, 'Key': filepath})\n logger.info(\"Copying of VLM pdfs is done. Filename is\" + filename)\n\n logger.info(\"Splitting pdf file has been started.. Filename is:\" + filename)\n result = client.get_object(Bucket=bucket, Key=filepath)\n body = result['Body'].read()\n f = PdfFileReader(BytesIO(body))\n for i in range(f.numPages):\n f = PdfFileReader(BytesIO(body))\n output = PdfFileWriter()\n output.addPage(f.getPage(i))\n page_num = str(i + 1)\n tmp_path = str(\"/tmp/\" + filename + \"_page_\" + page_num + \".pdf\")\n with open(tmp_path, \"wb\") as outputStream:\n output.write(outputStream)\n split_key_object = str(\n \"processing-zone/split-pdfs/\" + fileDate + \"/\" + filename[:-4] + \"_page_\" + page_num + \".pdf\")\n my_bucket.upload_file(tmp_path, split_key_object)\n\n if os.path.exists(tmp_path):\n os.remove(tmp_path)\n logger.info(\"Removed the file %s\" % tmp_path)\n else:\n logger.info(\"Sorry, file %s does not exist.\" % tmp_path)\n\n logger.info(\"Splitting pdf file has been successfully finished. Filename is: \" + filename)\n response2 = client.delete_object(Bucket=bucket, Key=filepath)\n logger.info(\"Delete of PO pdfs from trigger zone is done. Filename is\" + filename)\n tbl = (str(uuid.uuid4().time), filename, f.numPages, datetime.utcnow(),\n str(\"processing-zone/split-pdfs/\" + fileDate), str(\"landing-zone/archive-vlm/\" + fileDate),\n \"Success\")\n cursor.execute(query, tbl)\n conn.commit()\n\n elif filepath.endswith(\".json\"):\n logger.info(\"No Json file is found. filename is\" + filename)\n\n else:\n logger.info(\"No file found in trigger-folder\")\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Successful..!!')\n }\n\n except Exception as e:\n logger.info(\"Splitting process failed \" + str(repr(e)))\n tbl = (str(uuid.uuid4().time), filename, '', datetime.utcnow(), '', '', \"Failure\")\n cursor.execute(query, tbl)\n conn.commit()\n\n return {\n 'statusCode': 500,\n 'body': json.dumps('Splitting failed ..!!')\n }\n\n finally:\n if (conn):\n cursor.close()\n conn.close()\n logger.info(\"RDS connection is closed\")\n","sub_path":"vlm-splitter.py","file_name":"vlm-splitter.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"341862866","text":"\"\"\"\r\nHyperFlex Edge Auto Deploy Script for dCloud (All Flash/Hybrid, 2-Node), v1.0\r\nAuthor: Ugo Emekauwa\r\nContact: uemekauw@cisco.com, uemekauwa@gmail.com\r\nSummary: The HyperFlex Edge Auto Deploy Script enables automated deployment\r\n of Cisco HyperFlex Edge clusters through the Cisco Intersight API.\r\n\"\"\"\r\n\r\n########################\r\n# MODULE REQUIREMENT 1 #\r\n########################\r\n\"\"\"\r\nFor the following variable below named key_id, please fill in between\r\nthe quotes your Intersight API Key ID.\r\n\r\nHere is an example: key_id = \"5c89885075646127773ec143/5c82fc477577712d3088eb2f/5c8987b17577712d302eaaff\"\r\n\"\"\"\r\nkey_id = \"\"\r\n\r\n\r\n# MODULE REQUIREMENT 2\r\n\"\"\"\r\nFor the following variable below named key, please fill in between\r\nthe quotes your system's file path to your Intersight API key \"SecretKey.txt\" file.\r\n\r\nHere is an example: key = \"J:\\\\SecretKey.txt\"\r\n\"\"\"\r\nkey = \"J:\\\\SecretKey.txt\"\r\n\r\n\r\n# Import needed Python modules\r\nimport sys\r\nimport os\r\nimport datetime\r\nimport intersight\r\nfrom intersight.intersight_api_client import IntersightApiClient\r\n\r\n# Define time variable\r\nget_date = datetime.datetime.now()\r\ndate = get_date.strftime(\"%m/%d/%Y %H:%M:%S\")\r\n\r\n# Starting the HyperFlex Cluster Profile Deployment Script\r\nprint(\"Starting HyperFlex Cluster Profile Deployment Script.\")\r\n\r\n# Define Intersight SDK IntersightApiClient variables\r\n# Tested on Cisco Intersight API Reference v1.0.9-1229\r\nbase_url = \"https://intersight.com/api/v1\"\r\napi_instance = IntersightApiClient(host=base_url,private_key=key,api_key_id=key_id)\r\n\r\n# Establish function to test for the availability of the Intersight API and Intersight account\r\n\r\ndef test_intersight_service():\r\n \"\"\"This is a function to test the availability of the Intersight API and Intersight account. The Intersight account\r\n tested for is the owner of the provided Intersight API key and key ID.\r\n \"\"\"\r\n try:\r\n # Check that Intersight Account is accessible\r\n print(\"Testing access to the Intersight API by verifying the Intersight account information...\")\r\n check_account = intersight.IamAccountApi(api_instance)\r\n get_account = check_account.iam_accounts_get()\r\n if check_account.api_client.last_response.status is not 200:\r\n print(\"The Intersight API and Account Availability Test did not pass.\")\r\n print(\"The Intersight account information could not be verified.\")\r\n print(\"Exiting due to the Intersight account being unavailable.\\n\")\r\n print(\"Please verify that the correct API Key ID has been entered in the script.\\n\")\r\n sys.exit(0)\r\n else:\r\n account_name = get_account.results[0].name\r\n print(\"The Intersight API and Account Availability Test has passed.\\n\")\r\n print(\"The Intersight account named '\" + account_name + \"' has been found.\\n\")\r\n return account_name\r\n except Exception:\r\n print(\"Unable to access the Intersight API.\")\r\n print(\"Exiting due to the Intersight API being unavailable.\\n\")\r\n print(\"Please verify that the correct API Key ID has been entered in the script.\\n\")\r\n sys.exit(0)\r\n\r\n\r\n# Run the Intersight API and Account Availability Test\r\nprint(\"Running the Intersight API and Account Availability Test.\")\r\nintersight_api_test = test_intersight_service()\r\nintersight_account_name = intersight_api_test\r\n\r\n# Define required variables for HyperFlex Cluster Profile\r\nhx_cluster_profile_name = \"dcloud-hx-edge-cluster-1\"\r\nhx_software_version = \"4.0(1b)\"\r\nhx_mgmt_platform_type = \"EDGE\"\r\nhx_vlan_id = 100\r\nhx_node1_attribute = \"198.18.135.116\"\r\nhx_node2_attribute = \"198.18.135.117\"\r\nhx_mgmt_ip_address = \"198.18.135.100\"\r\nhx_mac_address_prefix = \"00:25:B5:00\"\r\n\r\n# Pre-defined HyperFlex Policies\r\nhx_local_credential_policy_name = \"sample-local-credential-policy\"\r\nhx_sys_config_policy_name = \"sample-sys-config-policy\"\r\nhx_vcenter_config_policy_name = \"sample-vcenter-config-policy\"\r\nhx_cluster_storage_policy_name = \"sample-cluster-storage-policy\"\r\nhx_node_config_policy_name = \"sample-node-config-policy\"\r\nhx_cluster_network_policy_name = \"sample-cluster-network-policy\"\r\n\r\n# Create the HyperFlex Software Version Policy\r\nprint(\"Attempting to create a new HyperFlex Software Version Policy for \\nHyperFlex \" + hx_software_version + \"...\")\r\nhx_software_version_policy_name = hx_cluster_profile_name + \"-software-version-policy\"\r\nprint(\"Checking for the presence of pre-existing HyperFlex Software Version Policies...\")\r\nhx_software_version_policy = intersight.HyperflexSoftwareVersionPolicyApi(api_instance)\r\nget_hx_software_version_policy = hx_software_version_policy.hyperflex_software_version_policies_get()\r\nget_hx_software_version_policy_dict = get_hx_software_version_policy.to_dict()\r\n\r\nif get_hx_software_version_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_software_version_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_software_version_policy_name:\r\n print(\"A HyperFlex Software Version Policy named \" + hx_software_version_policy_name + \" already exists.\")\r\n found_hx_software_version_policy = policy\r\n if found_hx_software_version_policy[\"hxdp_version\"] == hx_software_version:\r\n print(\"The existing HyperFlex Software Version Policy is configured with the \\nrequested software version, HyperFlex \" + hx_software_version + \".\\n\")\r\n else:\r\n print(\"The required HyperFlex Software Version Policy cannot be created due to a pre-exisiting policy that does not meet the requested HyperFlex software version.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI.\")\r\n print(\"Verify that no pre-existing HyperFlex clusters with the name \" + hx_cluster_profile_name + \" are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n break\r\n else:\r\n print(\"Creating a new HyperFlex Software Version Policy named \" + hx_software_version_policy_name + \".\\n\")\r\n hx_software_version_policy1_body = {\r\n \"Name\": hx_software_version_policy_name,\r\n \"HxdpVersion\": hx_software_version,\r\n }\r\n post_hx_software_version_policy1 = hx_software_version_policy.hyperflex_software_version_policies_post(hx_software_version_policy1_body)\r\nelse:\r\n print(\"Creating a new HyperFlex Software Version Policy named \" + hx_software_version_policy_name + \".\\n\")\r\n hx_software_version_policy1_body = {\r\n \"Name\": hx_software_version_policy_name,\r\n \"HxdpVersion\": hx_software_version,\r\n }\r\n post_hx_software_version_policy1 = hx_software_version_policy.hyperflex_software_version_policies_post(hx_software_version_policy1_body)\r\n\r\n# Retrieve the HyperFlex Software Version Policy\r\nget_hx_software_version_policy = hx_software_version_policy.hyperflex_software_version_policies_get()\r\nget_hx_software_version_policy_dict = get_hx_software_version_policy.to_dict()\r\n\r\nif get_hx_software_version_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_software_version_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_software_version_policy_name:\r\n hx_software_version_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex Software Version Policy named \" + hx_software_version_policy_name + \" with the MOID of \" + hx_software_version_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Software Version Policy named \" + hx_software_version_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve available rack servers\r\nprint(\"Retrieving available rack unit servers...\\n\")\r\ncmp_rack_unit = intersight.ComputeRackUnitApi(api_instance)\r\nget_cmp_rack_unit = cmp_rack_unit.compute_rack_units_get()\r\nget_cmp_rack_unit_dict = get_cmp_rack_unit.to_dict()\r\ncmp_rack_unit_list = []\r\n\r\nif get_cmp_rack_unit_dict[\"results\"] is not None:\r\n for rack_unit in get_cmp_rack_unit_dict[\"results\"]:\r\n if any(attribute in (hx_node1_attribute, hx_node2_attribute) for attribute in (rack_unit[\"kvm_ip_addresses\"][0][\"address\"], rack_unit[\"serial\"])):\r\n rack_unit_dict = {\"MOID\":rack_unit[\"moid\"], \"IP Address\":rack_unit[\"kvm_ip_addresses\"][0][\"address\"], \"Serial\":rack_unit[\"serial\"]}\r\n cmp_rack_unit_list.append(rack_unit_dict)\r\nelse:\r\n print(\"There were no available rack servers found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI \\nand verify that the required rack server is present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Find pre-defined HX server nodes among retrieved rack servers\r\ncmp_rack_unit_attribute_list = []\r\nfor selected_rack_unit in cmp_rack_unit_list:\r\n cmp_rack_unit_attribute_list.append(selected_rack_unit[\"IP Address\"])\r\n cmp_rack_unit_attribute_list.append(selected_rack_unit[\"Serial\"])\r\n\r\nif not cmp_rack_unit_list:\r\n print(\"No pre-defined HyperFlex server nodes were found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI \\nand verify that the required rack servers are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\nelse:\r\n for hx_node in cmp_rack_unit_list:\r\n if hx_node1_attribute in (hx_node[\"IP Address\"], hx_node[\"Serial\"]):\r\n hx_node1_moid = hx_node[\"MOID\"]\r\n print(\"The HyperFlex server node with the pre-defined attribute \" + hx_node1_attribute + \" has been found.\")\r\n print(\"The server has an IP address of \" + hx_node[\"IP Address\"] + \", the serial number \" + hx_node[\"Serial\"] + \", and the MOID of \" + hx_node[\"MOID\"] + \".\\n\")\r\n if hx_node2_attribute in (hx_node[\"IP Address\"], hx_node[\"Serial\"]):\r\n hx_node2_moid = hx_node[\"MOID\"]\r\n print(\"The HyperFlex server node with the pre-defined attribute \" + hx_node2_attribute + \" has been found.\")\r\n print(\"The server has an IP address of \" + hx_node[\"IP Address\"] + \", the serial number \" + hx_node[\"Serial\"] + \", and the MOID of \" + hx_node[\"MOID\"] + \".\\n\")\r\n\r\nfor hx_node_attribute in (hx_node1_attribute, hx_node2_attribute):\r\n if hx_node_attribute not in cmp_rack_unit_attribute_list:\r\n print(\"The required rack server with the assigned attribute \" + hx_node_attribute + \" \\nwas not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI \\nand verify that the required rack servers are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n \r\n# Retrieve the HyperFlex Local Credential Policy for the Cluster Configuration \"Security\" policy type settings\r\nhx_local_credential_policy = intersight.HyperflexLocalCredentialPolicyApi(api_instance)\r\nget_hx_local_credential_policy = hx_local_credential_policy.hyperflex_local_credential_policies_get()\r\nget_hx_local_credential_policy_dict = get_hx_local_credential_policy.to_dict()\r\n\r\nif get_hx_local_credential_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_local_credential_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_local_credential_policy_name:\r\n hx_local_credential_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex Local Credential Policy named \" + hx_local_credential_policy_name + \" with the MOID of \" + hx_local_credential_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Local Credential Policy named \" + hx_local_credential_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve the HyperFlex System Configuration Policy for the Cluster Configuration \"DNS, NTP, and Timezone\" policy type settings\r\nhx_sys_config_policy = intersight.HyperflexSysConfigPolicyApi(api_instance)\r\nget_hx_sys_config_policy = hx_sys_config_policy.hyperflex_sys_config_policies_get()\r\nget_hx_sys_config_policy_dict = get_hx_sys_config_policy.to_dict()\r\n\r\nif get_hx_sys_config_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_sys_config_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_sys_config_policy_name:\r\n hx_sys_config_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex System Configuration Policy named \" + hx_sys_config_policy_name + \" with the MOID of \" + hx_sys_config_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex System Configuration Policy named \" + hx_sys_config_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve the HyperFlex VMware vCenter Configuration Policy for the Cluster Configuration \"vCenter (Optional)\" policy type settings\r\nhx_vcenter_config_policy = intersight.HyperflexVcenterConfigPolicyApi(api_instance)\r\nget_hx_vcenter_config_policy = hx_vcenter_config_policy.hyperflex_vcenter_config_policies_get()\r\nget_hx_vcenter_config_policy_dict = get_hx_vcenter_config_policy.to_dict()\r\n\r\nif get_hx_vcenter_config_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_vcenter_config_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_vcenter_config_policy_name:\r\n hx_vcenter_config_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex VMware vCenter Configuration Policy named \" + hx_vcenter_config_policy_name + \" with the MOID of \" + hx_vcenter_config_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex VMware vCenter Configuration Policy named \" + hx_vcenter_config_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve the HyperFlex Cluster Storage Configuration Policy for the Cluster Configuration \"Storage Configuration (Optional)\" policy type settings\r\nhx_cluster_storage_policy = intersight.HyperflexClusterStoragePolicyApi(api_instance)\r\nget_hx_cluster_storage_policy = hx_cluster_storage_policy.hyperflex_cluster_storage_policies_get()\r\nget_hx_cluster_storage_policy_dict = get_hx_cluster_storage_policy.to_dict()\r\n\r\nif get_hx_cluster_storage_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_cluster_storage_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_cluster_storage_policy_name:\r\n hx_cluster_storage_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex Cluster Storage Configuration Policy named \" + hx_cluster_storage_policy_name + \" with the MOID of \" + hx_cluster_storage_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Cluster Storage Configuration Policy named \" + hx_cluster_storage_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve the HyperFlex Node Configuration Policy for the Cluster Configuration \"IP & Hostname\" policy type settings\r\nhx_node_config_policy = intersight.HyperflexNodeConfigPolicyApi(api_instance)\r\nget_hx_node_config_policy = hx_node_config_policy.hyperflex_node_config_policies_get()\r\nget_hx_node_config_policy_dict = get_hx_node_config_policy.to_dict()\r\n\r\nif get_hx_node_config_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_node_config_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_node_config_policy_name:\r\n hx_node_config_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex Node Configuration Policy named \" + hx_node_config_policy_name + \" with the MOID of \" + hx_node_config_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Node Configuration Policy named \" + hx_node_config_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Retrieve the HyperFlex Cluster Network Configuration Policy for the Cluster Configuration \"Network Configuration\" policy type settings\r\nhx_cluster_network_policy = intersight.HyperflexClusterNetworkPolicyApi(api_instance)\r\nget_hx_cluster_network_policy = hx_cluster_network_policy.hyperflex_cluster_network_policies_get()\r\nget_hx_cluster_network_policy_dict = get_hx_cluster_network_policy.to_dict()\r\n\r\nif get_hx_cluster_network_policy_dict[\"results\"] is not None:\r\n for policy in get_hx_cluster_network_policy_dict[\"results\"]:\r\n if policy[\"name\"] == hx_cluster_network_policy_name:\r\n hx_cluster_network_policy_moid = policy[\"moid\"]\r\n print(\"The required HyperFlex Cluster Network Configuration Policy named \" + hx_cluster_network_policy_name + \" with the MOID of \" + hx_cluster_network_policy_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Cluster Network Configuration Policy named \" + hx_cluster_network_policy_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Create the HyperFlex Cluster Profile\r\nprint(\"Attempting to create a new HyperFlex Cluster Profile...\")\r\nprint(\"Checking for the presence of pre-existing HyperFlex Cluster Profiles...\")\r\nhx_cluster_profile = intersight.HyperflexClusterProfileApi(api_instance)\r\nget_hx_cluster_profile = hx_cluster_profile.hyperflex_cluster_profiles_get()\r\nget_hx_cluster_profile_dict = get_hx_cluster_profile.to_dict()\r\n\r\nif get_hx_cluster_profile_dict[\"results\"] is not None:\r\n for profile in get_hx_cluster_profile_dict[\"results\"]:\r\n if profile[\"name\"] == hx_cluster_profile_name:\r\n print(\"A HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \" already exists.\")\r\n print(\"The new HyperFlex Cluster Profile cannot be created due to the pre-exisiting profile.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI.\")\r\n print(\"Verify that no pre-existing HyperFlex clusters with the name \" + hx_cluster_profile_name + \" are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n break\r\n else:\r\n print(\"Creating a new HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \".\\n\")\r\n hx_cluster_profile1_body = {\r\n \"Name\": hx_cluster_profile_name,\r\n \"MgmtPlatform\": hx_mgmt_platform_type,\r\n \"SoftwareVersion\": {\"Moid\":hx_software_version_policy_moid},\r\n \"LocalCredential\": {\"Moid\":hx_local_credential_policy_moid },\r\n \"SysConfig\": {\"Moid\": hx_sys_config_policy_moid},\r\n \"VcenterConfig\": {\"Moid\": hx_vcenter_config_policy_moid},\r\n \"ClusterStorage\": {\"Moid\": hx_cluster_storage_policy_moid},\r\n \"NodeConfig\": {\"Moid\": hx_node_config_policy_moid},\r\n \"ClusterNetwork\": {\"Moid\": hx_cluster_network_policy_moid}, \r\n \"StorageDataVlan\":{\"VlanId\": hx_vlan_id},\r\n \"MgmtIpAddress\": hx_mgmt_ip_address,\r\n \"MacAddressPrefix\": hx_mac_address_prefix,\r\n }\r\n post_hx_cluster_profile1 = hx_cluster_profile.hyperflex_cluster_profiles_post(hx_cluster_profile1_body)\r\nelse:\r\n print(\"Creating a new HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \".\\n\")\r\n hx_cluster_profile1_body = {\r\n \"Name\": hx_cluster_profile_name,\r\n \"MgmtPlatform\": hx_mgmt_platform_type,\r\n \"SoftwareVersion\": {\"Moid\":hx_software_version_policy_moid},\r\n \"LocalCredential\": {\"Moid\":hx_local_credential_policy_moid },\r\n \"SysConfig\": {\"Moid\": hx_sys_config_policy_moid},\r\n \"VcenterConfig\": {\"Moid\": hx_vcenter_config_policy_moid},\r\n \"ClusterStorage\": {\"Moid\": hx_cluster_storage_policy_moid},\r\n \"NodeConfig\": {\"Moid\": hx_node_config_policy_moid},\r\n \"ClusterNetwork\": {\"Moid\": hx_cluster_network_policy_moid}, \r\n \"StorageDataVlan\":{\"VlanId\": hx_vlan_id},\r\n \"MgmtIpAddress\": hx_mgmt_ip_address,\r\n \"MacAddressPrefix\": hx_mac_address_prefix,\r\n }\r\n post_hx_cluster_profile1 = hx_cluster_profile.hyperflex_cluster_profiles_post(hx_cluster_profile1_body)\r\n\r\n# Retrieve the HyperFlex Cluster Profile\r\nget_hx_cluster_profile = hx_cluster_profile.hyperflex_cluster_profiles_get()\r\nget_hx_cluster_profile_dict = get_hx_cluster_profile.to_dict()\r\n\r\nif get_hx_cluster_profile_dict[\"results\"] is not None:\r\n for profile in get_hx_cluster_profile_dict[\"results\"]:\r\n if profile[\"name\"] == hx_cluster_profile_name:\r\n hx_cluster_profile_moid = profile[\"moid\"]\r\n print(\"The required HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \" with the MOID of \" + hx_cluster_profile_moid + \" has been identified.\\n\")\r\nelse:\r\n print(\"The required HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \" was not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Create the HyperFlex Node Profiles\r\nprint(\"Attempting to create new HyperFlex Node Profiles...\")\r\nhx_node_profile = intersight.HyperflexNodeProfileApi(api_instance)\r\n\r\nhx_node_profile1_body = {\r\n \"Name\": \"hx-edge-esxi-1\",\r\n \"HxdpMgmtIp\": \"198.18.135.103\",\r\n \"HypervisorMgmtIp\": \"198.18.135.101\",\r\n \"AssignedServer\": {\"Moid\": hx_node1_moid},\r\n \"ClusterProfile\": {\"Moid\": hx_cluster_profile_moid}\r\n }\r\n\r\nhx_node_profile2_body = {\r\n \"Name\": \"hx-edge-esxi-2\",\r\n \"HxdpMgmtIp\": \"198.18.135.104\",\r\n \"HypervisorMgmtIp\": \"198.18.135.102\",\r\n \"AssignedServer\": {\"Moid\": hx_node2_moid},\r\n \"ClusterProfile\": {\"Moid\": hx_cluster_profile_moid}\r\n }\r\n\r\npost_hx_node_profile1 = hx_node_profile.hyperflex_node_profiles_post(hx_node_profile1_body)\r\npost_hx_node_profile2 = hx_node_profile.hyperflex_node_profiles_post(hx_node_profile2_body)\r\nprint(\"New HyperFlex Node Profiles have been created.\\n\")\r\n\r\n# Retrieve the HyperFlex Node Profiles\r\nget_hx_node_profile = hx_node_profile.hyperflex_node_profiles_get()\r\nget_hx_node_profile_dict = get_hx_node_profile.to_dict()\r\n\r\nif get_hx_node_profile_dict[\"results\"] is not None:\r\n for profile in get_hx_node_profile_dict[\"results\"]:\r\n if profile[\"cluster_profile\"][\"moid\"] == hx_cluster_profile_moid:\r\n print(\"The required HyperFlex Node Profile named \" + profile[\"name\"] + \" with the MOID of \" + profile[\"moid\"] + \" has been identified.\\n\")\r\nelse:\r\n print(\"Required HyperFlex Node Profiles were not found.\")\r\n print(\"Please check the Intersight Account named \" + intersight_account_name + \" through the GUI and verify that the needed resources are present.\")\r\n print(\"If further help is needed, please contact dCloud support.\")\r\n print(\"Exiting the HyperFlex Cluster Profile Deployment Script.\\n\")\r\n sys.exit(0)\r\n\r\n# Deploy the HyperFlex Cluster Profile\r\nprint(\"Attempting to deploy the new HyperFlex Cluster Profile...\")\r\nhx_cluster_profile1_body_update1 = {\"Action\": \"Deploy\"}\r\npatch_hx_cluster_profile1 = hx_cluster_profile.hyperflex_cluster_profiles_moid_patch(hx_cluster_profile_moid,hx_cluster_profile1_body_update1)\r\nprint(\"The new HyperFlex Cluster Profile named \" + hx_cluster_profile_name + \" \\nhas been deployed!\\n\")\r\nprint(\"The HyperFlex Notification Tool on the wkst1 desktop can be used \\nto receive email alerts on the progress.\\n\")\r\nsys.exit(0)\r\n","sub_path":"HX_Auto_Deploy.py","file_name":"HX_Auto_Deploy.py","file_ext":"py","file_size_in_byte":24852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"409690044","text":"# coding=utf-8\n\nimport os\nimport sys\n\nimport datetime\nfrom math import log\nimport random\nPROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))\n\nsys.path.insert(0, os.path.join(PROJECT_ROOT, os.pardir))\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../')))\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../push_util')))\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../')))\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../..')))\nsys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../../..')))\n\nfrom base.settings import load_django_settings\n\nload_django_settings('live_video.base', 'live_video.app')\n\nfrom app.customer.models.community import UserMoment, UserMomentLook\n\n\nepoch = datetime.datetime(1970, 1, 1)\n\ndef epoch_seconds(date):\n \"\"\"Returns the number of seconds from the epoch to date.\"\"\"\n td = date - epoch\n return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n\ndef score(ups, downs):\n return ups - downs\n\ndef hot(ups, downs, date):\n \"\"\"The hot formula. Should match the equivalent function in postgres.\"\"\"\n s = score(ups, downs)\n order = log(max(abs(s), 1), 10)\n sign = 1 if s > 0 else -1 if s < 0 else 0\n seconds = epoch_seconds(date) - 1134028003\n return round(order + sign * seconds / 450000, 7)\n\n\ndef update_rank_score():\n now = datetime.datetime.now()\n start_date = datetime.datetime(now.year, now.month, now.day) - datetime.timedelta(days=10)\n moments = UserMoment.objects.filter(create_time__gte=start_date, create_time__lte=now)\n if not moments:\n return\n\n for moment in moments:\n like_count = moment.like_count\n moment_look = UserMomentLook.objects.filter(user_moment_id=str(moment.id)).first()\n look_count = 0\n if moment_look:\n look_user_ids = moment_look.user_id_list\n look_count = len(look_user_ids)\n\n if int(moment.type) == 3:\n ups = like_count*2 + look_count + int(30*(random.random()) + 1)\n else:\n ups = like_count*2 + look_count\n rank_score = hot(ups, 0, moment.create_time)\n moment.update(set__rank_score=rank_score)\n\n\nif __name__ == '__main__':\n update_rank_score()\n\n\n# now = datetime.datetime.now()\n# result = hot(100, 0, now)\n# result2 = hot(100, 0, now)\n# result3 = hot(100, 0, now)\n# print result\n# print result2\n# print result3\n","sub_path":"background/rank_score.py","file_name":"rank_score.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"415333994","text":"\"\"\"Base class for working with log records.\n\nClass `Logs` wraps log records to analyze logs. Logs are mainly populated when\nsimulating a portfolio and can be accessed as `vectorbt.portfolio.base.Portfolio.logs`.\n\n## Stats\n\n!!! hint\n See `vectorbt.generic.stats_builder.StatsBuilderMixin.stats` and `Logs.metrics`.\n\n```python-repl\n>>> import pandas as pd\n>>> import numpy as np\n>>> from datetime import datetime, timedelta\n>>> import vectorbt as vbt\n\n>>> np.random.seed(42)\n>>> price = pd.DataFrame({\n... 'a': np.random.uniform(1, 2, size=100),\n... 'b': np.random.uniform(1, 2, size=100)\n... }, index=[datetime(2020, 1, 1) + timedelta(days=i) for i in range(100)])\n>>> size = pd.DataFrame({\n... 'a': np.random.uniform(-100, 100, size=100),\n... 'b': np.random.uniform(-100, 100, size=100),\n... }, index=[datetime(2020, 1, 1) + timedelta(days=i) for i in range(100)])\n>>> pf = vbt.Portfolio.from_orders(price, size, fees=0.01, freq='d', log=True)\n\n>>> pf.logs.stats(column='a')\nStart 2020-01-01 00:00:00\nEnd 2020-04-09 00:00:00\nPeriod 100 days 00:00:00\nTotal Records 100\nStatus Counts: None 0\nStatus Counts: Filled 88\nStatus Counts: Ignored 0\nStatus Counts: Rejected 12\nStatus Info Counts: None 88\nStatus Info Counts: NoCashLong 12\nName: a, dtype: object\n```\n\n`Logs.stats` also supports (re-)grouping:\n\n```python-repl\n>>> pf.logs.stats(group_by=True)\nStart 2020-01-01 00:00:00\nEnd 2020-04-09 00:00:00\nPeriod 100 days 00:00:00\nTotal Records 200\nStatus Counts: None 0\nStatus Counts: Filled 187\nStatus Counts: Ignored 0\nStatus Counts: Rejected 13\nStatus Info Counts: None 187\nStatus Info Counts: NoCashLong 13\nName: group, dtype: object\n```\"\"\"\n\nimport pandas as pd\n\nfrom vectorbt import _typing as tp\nfrom vectorbt.utils.config import merge_dicts, Config\nfrom vectorbt.utils.enum import map_enum_values\nfrom vectorbt.base.array_wrapper import ArrayWrapper\nfrom vectorbt.base.reshape_fns import to_dict\nfrom vectorbt.generic.stats_builder import StatsBuilderMixin\nfrom vectorbt.records.base import Records\nfrom vectorbt.records.decorators import add_mapped_fields\nfrom vectorbt.portfolio.enums import (\n log_dt,\n SizeType,\n Direction,\n OrderSide,\n OrderStatus,\n StatusInfo\n)\n\n__pdoc__ = {}\n\nlogs_mf_config = Config(\n dict(\n size_type=dict(defaults=dict(mapping=SizeType)),\n direction=dict(defaults=dict(mapping=Direction)),\n res_side=dict(defaults=dict(mapping=OrderSide)),\n res_status=dict(defaults=dict(mapping=OrderStatus)),\n res_status_info=dict(defaults=dict(mapping=StatusInfo))\n ),\n as_attrs=False,\n readonly=True\n)\n\"\"\"_\"\"\"\n\n__pdoc__['logs_mf_config'] = f\"\"\"Config of `vectorbt.portfolio.enums.log_dt` \nmapped fields to be overridden in `Logs`.\n\n```json\n{logs_mf_config.to_doc()}\n```\n\"\"\"\n\n\n@add_mapped_fields(log_dt, logs_mf_config)\nclass Logs(Records):\n \"\"\"Extends `Records` for working with log records.\n\n !!! note\n Some features require the log records to be sorted prior to the processing.\n Use the `vectorbt.records.base.Records.sort` method.\"\"\"\n\n def __init__(self,\n wrapper: ArrayWrapper,\n records_arr: tp.RecordArray,\n idx_field: str = 'idx',\n **kwargs) -> None:\n Records.__init__(\n self,\n wrapper,\n records_arr,\n idx_field=idx_field,\n **kwargs\n )\n\n if not all(field in records_arr.dtype.names for field in log_dt.names):\n raise TypeError(\"Records array must match debug_info_dt\")\n\n @property # no need for cached\n def records_readable(self) -> tp.Frame:\n \"\"\"Records in readable format.\"\"\"\n df = self.records.copy()\n df.columns = pd.MultiIndex.from_tuples([\n ('Context', 'Log Id'),\n ('Context', 'Date'),\n ('Context', 'Column'),\n ('Context', 'Group'),\n ('Context', 'Cash'),\n ('Context', 'Position'),\n ('Context', 'Debt'),\n ('Context', 'Free Cash'),\n ('Context', 'Val Price'),\n ('Context', 'Value'),\n ('Order', 'Size'),\n ('Order', 'Price'),\n ('Order', 'Size Type'),\n ('Order', 'Direction'),\n ('Order', 'Fees'),\n ('Order', 'Fixed Fees'),\n ('Order', 'Slippage'),\n ('Order', 'Min Size'),\n ('Order', 'Max Size'),\n ('Order', 'Rejection Prob'),\n ('Order', 'Lock Cash'),\n ('Order', 'Allow Partial'),\n ('Order', 'Raise Rejection'),\n ('Order', 'Log'),\n ('New Context', 'Cash'),\n ('New Context', 'Position'),\n ('New Context', 'Debt'),\n ('New Context', 'Free Cash'),\n ('New Context', 'Val Price'),\n ('New Context', 'Value'),\n ('Order Result', 'Size'),\n ('Order Result', 'Price'),\n ('Order Result', 'Fees'),\n ('Order Result', 'Side'),\n ('Order Result', 'Status'),\n ('Order Result', 'Status Info'),\n ('Order Result', 'Order Id')\n ])\n\n df[('Context', 'Date')] = df[('Context', 'Date')].map(lambda x: self.wrapper.index[x])\n df[('Context', 'Column')] = df[('Context', 'Column')].map(lambda x: self.wrapper.columns[x])\n df[('Order', 'Size Type')] = map_enum_values(df[('Order', 'Size Type')], SizeType)\n df[('Order', 'Direction')] = map_enum_values(df[('Order', 'Direction')], Direction)\n df[('Order Result', 'Side')] = map_enum_values(df[('Order Result', 'Side')], OrderSide)\n df[('Order Result', 'Status')] = map_enum_values(df[('Order Result', 'Status')], OrderStatus)\n df[('Order Result', 'Status Info')] = map_enum_values(df[('Order Result', 'Status Info')], StatusInfo)\n return df\n\n # ############# Stats ############# #\n\n @property\n def stats_defaults(self) -> tp.Kwargs:\n \"\"\"Defaults for `Orders.stats`.\n\n Merges `vectorbt.generic.stats_builder.StatsBuilderMixin.stats_defaults` and\n `logs.stats` in `vectorbt._settings.settings`.\"\"\"\n from vectorbt._settings import settings\n logs_stats_cfg = settings['logs']['stats']\n\n return merge_dicts(\n StatsBuilderMixin.stats_defaults.__get__(self),\n logs_stats_cfg\n )\n\n _metrics: tp.ClassVar[Config] = Config(\n dict(\n start=dict(\n title='Start',\n calc_func=lambda self: self.wrapper.index[0],\n agg_func=None,\n tags='wrapper'\n ),\n end=dict(\n title='End',\n calc_func=lambda self: self.wrapper.index[-1],\n agg_func=None,\n tags='wrapper'\n ),\n period=dict(\n title='Period',\n calc_func=lambda self: len(self.wrapper.index),\n apply_to_timedelta=True,\n agg_func=None,\n tags='wrapper'\n ),\n total_records=dict(\n title='Total Records',\n calc_func='count',\n tags='records'\n ),\n res_status_counts=dict(\n title='Status Counts',\n calc_func='res_status.value_counts',\n incl_all_keys=True,\n post_calc_func=lambda self, out, settings: to_dict(out, orient='index_series'),\n tags=['logs', 'res_status', 'value_counts']\n ),\n res_status_info_counts=dict(\n title='Status Info Counts',\n calc_func='res_status_info.value_counts',\n post_calc_func=lambda self, out, settings: to_dict(out, orient='index_series'),\n tags=['logs', 'res_status_info', 'value_counts']\n )\n ),\n copy_kwargs=dict(copy_mode='deep')\n )\n\n @property\n def metrics(self) -> Config:\n return self._metrics\n\n\nLogs.override_metrics_doc(__pdoc__)\n","sub_path":"vectorbt/portfolio/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":8576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"460692763","text":"\"\"\"\r\n\r\nStep1:Import Package SMTPlib\r\nStep2: OPen Connection (Gmail/Hotmail)(username, Password\r\nStep3: open connection in secure way (SSL/TLS)\r\nStep4: Send mail(User name , Sender Add, Message)\r\nStep5: Close connection\r\n\r\n\r\n\"\"\"\r\n\r\nimport smtplib\r\ncon = smtplib.SMTP('smtp.gmail.com',587)\r\ncon.login(\"manishukla9lt@Gmail.com\",\"Qwerty@1008\")\r\ncon.starttls()\r\nmessage = \" I hope you are learning new tips and Tricks in python ...\"\r\ncon.sendmail(\"manishukla9lt@Gmail.com\",\"parthshukla@9ledgepro.com\",msg = message)\r\nprint(\"mail Sent sucessfully ......\")\r\ncon.close()","sub_path":"Demo 14 Sending_mail.py","file_name":"Demo 14 Sending_mail.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"330731494","text":"from tornado import web\nfrom tornado import httpserver\nfrom tornado import ioloop\nimport time\n\n# 逻辑处理模块\nclass MainPageHandler(web.RequestHandler):\n def get(self, *args, **kwargs):\n #print('这是get请求方式')\n #self.write('hello world.')\n t = time.ctime()\n self.render('index.html',time=t)\n def post(self, *args, **kwargs):\n pass\n\n# 登陆的模块\nclass LoginHandler(web.RequestHandler):\n def post(self, *args, **kwargs):\n self.render('login.html')\n def get(self, *args, **kwargs):\n self.render('login.html')\n\n# 路由与设置\napplication = web.Application([\n (r\"/index\", MainPageHandler),\n (r\"/login\", LoginHandler),\n])\n\nif __name__ == '__main__':\n http_server = httpserver.HTTPServer(application)\n print('http://127.0.0.1:8080')\n http_server.listen(8080)\n ioloop.IOLoop.current().start()\n","sub_path":"project/tornado框架/source/mytornado.py","file_name":"mytornado.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"588160799","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndef sin(x, T=100):\n return np.sin(2.0 * np.pi * x / T)\n\ndef toy_problem(T=100, ampl=0.05):\n x = np.arange(0, 2 * T + 1)\n noise = ampl * np.random.uniform(low=-1.0, high=1.0, size=len(x))\n return sin(x) + noise\n\ndef load(T=100, maxlen=25):\n \"\"\"\n sin波にノイズを加えたデータセットを作成する\n @param T\n @param maxlen 一つの時系列の長さ\n \"\"\"\n # 学習データの準備\n f = toy_problem(T)\n\n # 全データがある中τごとに分割\n length_of_sequences = 2 * T # 全時系列の長さ\n\n data = []\n target = []\n\n for i in range(0, length_of_sequences - maxlen + 1):\n data.append(f[i: i + maxlen])\n target.append(f[i + maxlen])\n\n X = np.array(data).reshape(len(data), maxlen, 1)\n Y = np.array(target).reshape(len(data), 1)\n\n N_train = int(len(data) * 0.9)\n N_validation = len(data) - N_train\n\n X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=N_validation)\n return X_train, X_validation, Y_train, Y_validation","sub_path":"tensorflow/RNN/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"41752054","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nfrom unittest import TestCase\n\nfrom polyaxon_schemas.api.log_handler import LogHandlerConfig\n\n\nclass TestLogHandlerConfig(TestCase):\n def test_log_handler_config(self):\n config_dict = {'dsn': 'https//foo:bar',\n 'environment': 'staging',\n 'tags': {}}\n config = LogHandlerConfig.from_dict(config_dict)\n assert config.to_dict() == config_dict\n","sub_path":"tests/test_api/test_log_handler.py","file_name":"test_log_handler.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"355123201","text":"import argparse\nimport logging\n\nfrom RhythmGame import *\nfrom config import GameConfig\nfrom utils import *\n\nimport cv2\n\n\nif __name__ == '__main__':\n config = GameConfig()\n params = {'diff': None, 'patterns': None, 'song': None, 'exit': None, 'menu': None, 'restart': None}\n\n logger = logging.getLogger('TfPoseEstimator-WebCam')\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')\n parser.add_argument('--camera', type=int, default=0)\n\n parser.add_argument('--resize', type=str, default='0x0',\n help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\n\n parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--show-process', type=bool, default=False,\n help='for debug purpose, if enabled, speed for inference is dropped.')\n \n parser.add_argument('--tensorrt', type=str, default=\"False\",\n help='for tensorrt process.')\n args = parser.parse_args()\n\n # load pattern and song, start game\n while True:\n try:\n main_menu(config, params)\n except:\n print(\"Failed to load main_menu. 메인 메뉴 불러오기 실패\")\n\n if params[\"exit\"] is True:\n cv2.destroyAllWindows()\n break\n\n try:\n print('load_pattern')\n load_pattern(config, params)\n print('load_song')\n load_song(config, params)\n print('start_game')\n start_game(config, params)\n except:\n print(\"Failed to load the game data. 게임 정보 불러오기 실패\")\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"run_webcam.py","file_name":"run_webcam.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"460508941","text":"'''\nUS Job Counts by Industry, 2006-2015\n===============================\nInteractive heat map shows how total US job change compared to\njob change by industry surrounding stock crash in 2008.\n'''\n\n\nimport pandas as pd\nimport altair as alt\nfrom datetime import datetime as dt\n\nus_employment = pd.read_csv(\"https://raw.githubusercontent.com/vega/vega-datasets/master/data/us-employment.csv\")\nus_employment[\"date\"] = pd.to_datetime(us_employment[\"month\"]) # date convert to datetime format and create properly named \"date\" column\nus_employment = us_employment.drop(columns = [\"month\", \"nonfarm_change\"]) #drop nonfarm_change - I will recreate for all job types\nus_employment = us_employment.rename(index=str, columns={\"nonfarm\": \"all_jobs\"})\nus_employment = us_employment.melt(id_vars = [\"date\"], value_vars = list(us_employment.columns[0:22]), var_name = \"job_type\", value_name = \"job_count\").reset_index()\n\n# create a change by job column to analyze change in specific job markets\nmonthly_change_by_job = []\nfor i in range(len(us_employment)-1):\n monthly_change_by_job.append(us_employment.job_count[i+1] - us_employment.job_count[i])\nmonthly_change_by_job.append(None) #add single value to list of 359 values to add to df\nus_employment[\"monthly_change_by_job\"] = monthly_change_by_job\n\n# drop the 2015-12-01 rows to work with my monthly change column\ndate_drop = pd.to_datetime(\"2015-12-01\")\nus_employment = us_employment[us_employment.date != date_drop]\n\n## Replace \"_\" so that job_types look nice on heat map!\nus_employment.job_type = us_employment.job_type.str.replace(\"_\", \" \")\n\n#interactive heatmap and barplot, concatonated\n\nbrush = alt.selection_interval(encodings=['x'])\nopacity = alt.condition(brush, alt.value(0.9), alt.value(0.1))\n\n\nplot_all_jobs = alt.Chart(pd.DataFrame(us_employment[us_employment.job_type == \"all jobs\"])).mark_bar().encode(\n alt.X(\"date:T\", title = \"Date\"),\n alt.Y(\"monthly_change_by_job:Q\", title = \"Monthly Change in Jobs (thousands)\"),\n alt.Color(\"monthly_change_by_job:Q\", scale=alt.Scale(scheme='yellowgreenblue')\n ), opacity = opacity, tooltip=['date', 'monthly_change_by_job']).add_selection(\n brush).properties(\n title='Change in Total US Employment From 2006 to 2016', height = 200, width = 600)\n\njobs_heat_map = alt.Chart(us_employment[~us_employment.job_type.isin([\"all jobs monthly change\", \"all jobs\"])]).mark_rect().encode(\n alt.X('date:O', title = \"Date\", timeUnit='utcyearmonth', axis=alt.Axis(tickCount = 5)),\n alt.Y('job_type:N', title = \"Job Type\"),\n color = alt.Color('monthly_change_by_job:Q', legend=alt.Legend(title=\"Job Count Change in Thousands\", orient = \"right\")),\n tooltip=['job_type','date', 'monthly_change_by_job'], opacity = opacity\n).properties(title = \"Heat Map US Jobs Over Time\", width = 600, height = 450)\n\nplot_all_jobs & jobs_heat_map","sub_path":"altair/examples/Ilanas_us_employment_interactive_heat_map_and_barplot.py","file_name":"Ilanas_us_employment_interactive_heat_map_and_barplot.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"567627134","text":"#!/usr/bin/env python3\n# _ ____\n# _ _ _ __| |___ ___ __ _ _ __ | _ \\ _ __ ___\n# | | | | '__| / __|/ __/ _` | '_ \\ | |_) | '__/ _ \\\n# | |_| | | | \\__ \\ (_| (_| | | | | | __/| | | (_) |\n# \\__,_|_| |_|___/\\___\\__,_|_| |_| |_| |_| \\___/\n# \n# urlscan Pro Python API - (c) 2019 by Johannes Gilger - Web Security\n\nfrom gevent import monkey\nmonkey.patch_all()\nimport gevent\n\nimport argparse\nimport requests\nimport arrow\nimport logging\nimport sys\nfrom pydash import _\nimport pprint\npp = pprint.PrettyPrinter(indent=2)\n\nsession = requests.Session()\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=\"urlscan Pro API get\")\nparser.add_argument(\"action\", type=str, default=\"showbrands\", help=\"Action to perform: showbrands or showlatest\")\nparser.add_argument(\"--apikey\", type=str, default=\"\", help=\"API-Key for urlscan Pro\", required=True)\n\nparser.add_argument(\"--brand\", type=str, default=\"all\", help=\"Brand name\")\nparser.add_argument(\"--limit\", type=int, default=10, help=\"Return at most this many results\")\n\nparser.add_argument(\"--since\", type=str, default=\"7d\", help=\"Show phishing pages detected in the last minutes (m), hours (h), days (d), weeks (w), months (M)\")\nparser.add_argument(\"--query\", type=str, default=\"\", help=\"Query string\")\nargs = parser.parse_args()\n\nsession.headers.update({'api-key': args.apikey})\n\nquery = \"*\"\nif args.since:\n query = \"date%%3A>now-%s\" % args.since\nif args.query:\n query = \"%s AND (%s)\" % (query, args.query)\n\nif args.action == \"showbrands\":\n r = session.get(\"https://pro.urlscan.com/api/v1/pro/kits\")\n if not r.status_code == requests.codes.ok:\n logging.error(\"Error fetching brand definitions: %s\" % r.json())\n sys.exit(1)\n for kit in r.json()[\"kits\"]:\n print(\"=\"*80)\n print(\"%s - %s (%s)\\nKey: %s\\nWhitelisted domains: %s\" % (\n kit[\"name\"],\n _.head(_.get(kit, \"vertical\", [])),\n _.head(_.get(kit, \"country\", [])),\n kit[\"key\"],\n _.get(kit, \"terms.domains\", [])\n ))\n print(\"URL: https://pro.urlscan.com/search?filter=%%24phishing_%s\" % kit[\"key\"])\n print(\"API: https://pro.urlscan.com/api/v1/pro/search?filter=%%24phishing_%s\" % kit[\"key\"])\nelif args.action == \"showlatest\":\n r = session.get(\"https://pro.urlscan.com/api/v1/pro/search?q=%s&filter=$phishing_%s&size=%d\" % (query, args.brand, args.limit))\n print(\"\\nSearching for brand '%s' with query '%s' and limit '%d'\" % (args.brand, query, args.limit))\n print(\"Show in Pro: https://pro.urlscan.com/search?query=%s&filter=$phishing_%s\" % (query, args.brand))\n if not r.status_code == requests.codes.ok:\n logging.error(\"Error fetching brand definitions: %s\" % r.json())\n sys.exit(1)\n print(\"%d/%d results returned\\n\\n\" % (len(r.json()[\"results\"]), r.json()[\"total\"]))\n for res in r.json()[\"results\"]:\n print(\"=\"*80)\n print(\"Submitted URL: %s\" % _.get(res, \"task.url\"))\n print(\"Actual URL: %s\" % _.get(res, \"page.url\"))\n print(\"Submitted: %s via %s (Source: %s)\" % (_.get(res, \"task.time\"), _.get(res, \"task.method\"), _.get(res, \"task.source\")))\n print(\"Page IP: %s - %s (%s)\" % (_.get(res, \"page.ip\"), _.get(res, \"page.asn\"), _.get(res, \"page.asnname\")))\n print(\"Scan: https://urlscan.io/result/%s/\" % res[\"_id\"])\n print(\"API: https://urlscan.io/api/v1/result/%s/\" % res[\"_id\"])\n print(\"\")\n\n #pp.pprint(res)\nelse:\n logging.error(\"Unknown command '%s', quitting...\" % args.action)\n sys.exit(1)\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"433512817","text":"year=eval(input(\"Enter year: (e.g., 2008): \"))\nmonth=eval(input(\"Enter month: 1-12: \"))\ndayOfMonth=eval(input(\"Enter the day of the month: 1-31: \"))\n\nq=dayOfMonth\nif month==1:\n m=13\n year=year-1\nelif month==2:\n m=14\n year=year-1\nelse:\n m=month\n#m=month\nj=year//100\nk=year%100\nh=(q+26*(m+1)//10+k+k//4+j//4+5*j)%7\nday=\"\"\nif h==0:\n day=\"Saturday\"\nelif h==1:\n day=\"Sunday\"\nelif h==2:\n day=\"Monday\"\nelif h==3:\n day=\"Tuesday\"\nelif h==4:\n day=\"Wedneday\"\nelif h==5:\n day=\"Thurday\"\nelse:\n day=\"Friday\"\nprint(\"Day of the week is \"+day)","sub_path":"HuangHuichao/exercise1/DayOfWeek4.21.py","file_name":"DayOfWeek4.21.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"553216904","text":"import os\nfrom datetime import datetime\nfrom scrapperize.spiders import MySpider\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\n\n# TESTAVIMO REŽIMAS True/False\nis_test_mode = False\n\n\npages = ['https://okdiario.com/']\n# source_language='ru' -jeigu rusų kalba\n# source_language='es' -jeigu ispanų kalba\nsource_language = \"es\"\n# destination_language neliečiam\ndestination_language = \"en\"\n# self.is_paging_exists = False -jeigu nėra puslapiavimo\n# self.is_paging_exists = True -jeigu yra\nis_paging_exists = True\n\n# jeigu img - img::attr('src')\n# jeigu - a::attr('href')\n# jeigu kitoks atributas - div[atributas='reikšmė']\n# kategorijos nuoroda a::attr('href')\ncategory_links = \".menu-item.menu-item-type-custom a::attr('href')\"\n# kategorijos puslapio puslapiavimo nuoroda, jeigu nėra puslapiavimo - praleisti\ncategory_pages = \"ul.okdiario-secciones-menu-navegacion-ul li a::attr(href)\"\n# kategorijų puslapių straipsnio nnuoroda\ncategory_page_posts = \"section.content article header.article-header h2 a::attr('href')\"\n# straipsnio pavadinimas\npost_title = \"h1.entry-title::text\"\n# straipsnio kategorija\npost_category = \".topics ul li a::text\"\n# straipsnio nuotrauka img::attr('src')\npost_image = \"section.content img::attr('src')\"\n# straipsnio turinio blokas, galimai div[atributas='reikšmė']\npost_content = \"div.entry-content p\"\n\ndef crawl_wp17():\n current_time = datetime.now().strftime(\"%Y-%m-%d\")\n FEED_URI = f\"wp17_{current_time}_{destination_language}.csv\"\n settings = get_project_settings()\n settings.update({\"FEED_URI\": FEED_URI})\n if os.path.isfile(FEED_URI):\n os.remove(FEED_URI)\n crawler = CrawlerProcess(settings=settings)\n crawler.crawl(\n crawler_or_spidercls = MySpider,\n pages=pages,\n is_test_mode=is_test_mode,\n is_paging_exists=is_paging_exists,\n source_language=source_language,\n destination_language=destination_language,\n category_links=category_links,\n category_pages=category_pages,\n category_page_posts=category_page_posts,\n post_title=post_title,\n post_category=post_category,\n post_image=post_image,\n post_content=post_content,\n )\n crawler.start()\n","sub_path":"scrapperize/spiders/wp17okidario/scrape_posts.py","file_name":"scrape_posts.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"84615642","text":"import random\nclass RoadRoute(object):\n \n def __init__(self, routeId, edgeList):\n self.routeId = routeId\n self.edgeList = edgeList\n \n \n \n @classmethod\n def generateRoutes(cls, adjacentMatrix):\n numOfNodes = len(adjacentMatrix)\n routesList = []\n for i in xrange(numOfNodes):\n routesList.append(RoadRoute(i, cls.generateARoute(adjacentMatrix, i, random.randint(1, numOfNodes))))\n \n return routesList \n \n \n \n @classmethod\n def generateARoute(cls, adjacentMatrix, startNode, numOfSteps):\n routeNodeList = []\n routeNodeList.append(startNode)\n stepCount = 0\n numOfNodes = len(adjacentMatrix)\n while(stepCount < numOfSteps):\n foundAdjacent = False\n for i in xrange(numOfNodes):\n if adjacentMatrix[startNode][i] ==1 :\n stepCount += 1\n startNode = i\n foundAdjacent = True\n routeNodeList.append(i)\n if foundAdjacent == False:\n break;\n return routeNodeList \n \n def __str__(self): \n edgeStr = \"\"\n edgeNum = len(self.edgeList)\n for i in xrange(edgeNum):\n edgeStr += str(self.edgeList[i])\n if i != edgeNum - 1:\n edgeStr += \" \"\n return \"\"\"\"\"\" % (self.routeId, edgeStr)\n \n ","sub_path":"models/road_route.py","file_name":"road_route.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243299035","text":"from __future__ import unicode_literals, print_function\nimport sys\nimport logging\nimport re\nfrom libraries.aws_tools.s3_handler import S3Handler\nfrom libraries.aws_tools.dynamodb_handler import DynamoDBHandler\nfrom libraries.aws_tools.lambda_handler import LambdaHandler\nfrom libraries.gogs_tools.gogs_handler import GogsHandler\nfrom sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef resetable(cls):\n cls._resetable_cache_ = cls.__dict__.copy()\n return cls\n\n\ndef reset_class(cls):\n cache = cls._resetable_cache_ # raises AttributeError on class without decorator\n for key in [key for key in cls.__dict__ if key not in cache and key != '_resetable_cache_']:\n delattr(cls, key)\n for key, value in cache.items(): # reset the items to original values\n try:\n if key != '_resetable_cache_':\n setattr(cls, key, value)\n except AttributeError:\n pass\n\n\ndef setup_logger(logger, level):\n \"\"\"\n Logging for the App, and turn off boto logging.\n Set here so automatically ready for any logging calls\n :param logger:\n :param level:\n :return:\n \"\"\"\n for h in logger.handlers:\n logger.removeHandler(h)\n sh = logging.StreamHandler(sys.stdout)\n head = '%(asctime)s - %(levelname)s: %(message)s'\n sh.setFormatter(logging.Formatter(head))\n logger.addHandler(sh)\n logger.setLevel(level)\n # Change these loggers to only report errors:\n logging.getLogger('boto3').setLevel(logging.ERROR)\n logging.getLogger('botocore').setLevel(logging.ERROR)\n\n\n@resetable\nclass App(object):\n \"\"\"\n For all things used for by this app, from DB connection to global handlers\n \"\"\"\n _resetable_cache_ = {}\n name = 'tx-manager'\n\n # Stage Variables, defaults\n prefix = ''\n api_url = 'https://api.door43.org'\n pre_convert_bucket = 'tx-webhook-client'\n cdn_bucket = 'cdn.door43.org'\n door43_bucket = 'door43.org'\n gogs_user_token = None\n gogs_url = 'https://git.door43.org'\n gogs_domain_name = 'git.door43.org'\n gogs_ip_address = '127.0.0.1'\n job_table_name = 'tx-job'\n module_table_name = 'tx-module'\n language_stats_table_name = 'language-stats'\n linter_messaging_name = 'linter_complete'\n db_protocol = 'mysql+pymysql'\n db_user = 'tx'\n db_pass = None\n db_end_point = 'd43-gogs.ccidwldijq9p.us-west-2.rds.amazonaws.com'\n db_port = '3306'\n db_name = 'tx'\n db_connection_string = None\n db_connection_string_params = 'charset=utf8mb4&use_unicode=0'\n\n # Prefixing vars\n # All variables that we change based on production, development and testing environments.\n prefixable_vars = ['api_url', 'pre_convert_bucket', 'cdn_bucket', 'door43_bucket', 'job_table_name',\n 'module_table_name', 'language_stats_table_name', 'linter_messaging_name',\n 'db_name', 'db_user']\n\n # DB related\n ModelBase = declarative_base() # To be used in all libraries/model classes as the parent class: App.ModelBase\n auto_setup_db = True\n manifest_table_name = 'manifests'\n db_echo = False # Whether or not to echo DB queries to the debug log. Useful for debugging. Set before setup_db()\n db_engine = None\n db = None\n echo = False\n\n # S3 and DynamoDB Handler related\n auto_setup_handlers = True\n cdn_s3_handler = None\n door43_s3_handler = None\n pre_convert_s3_handler = None\n job_db_handler = None\n module_db_handler = None\n language_stats_db_handler = None\n lambda_handler = None\n gogs_handler = None\n aws_access_key_id = None\n aws_secret_access_key = None\n aws_region_name = 'us-west-2'\n\n logger = logging.getLogger()\n setup_logger(logger, logging.DEBUG)\n\n def __init__(self, reset=True, **kwargs):\n \"\"\"\n Using init to set the class variables with App(var=value)\n :param kwargs:\n \"\"\"\n if reset:\n reset_class(App)\n\n if 'prefix' in kwargs and kwargs['prefix'] != App.prefix:\n App.prefix_vars(kwargs['prefix'])\n\n App.set_vars(**kwargs)\n\n if App.auto_setup_handlers:\n App.setup_handlers()\n\n if App.auto_setup_db and (App.db_connection_string or App.db_pass or App.db_protocol == 'sqlite'):\n App.setup_db(self.echo)\n\n @classmethod\n def prefix_vars(cls, prefix):\n \"\"\"\n Prefixes any variables in App.prefixable_variables. This includes URLs\n :return:\n \"\"\"\n url_re = re.compile(r'^(https*://)') # Current prefix in URLs\n for var in App.prefixable_vars:\n value = getattr(App, var)\n if re.match(url_re, value):\n value = re.sub(url_re, r'\\1{0}'.format(prefix), value)\n else:\n value = prefix + value\n setattr(App, var, value)\n App.prefix = prefix\n\n @classmethod\n def set_vars(cls, **kwargs):\n for var, value in kwargs.iteritems():\n if hasattr(App, var):\n setattr(App, var, value)\n\n @classmethod\n def setup_handlers(cls):\n App.cdn_s3_handler = S3Handler(bucket_name=App.cdn_bucket,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.door43_s3_handler = S3Handler(bucket_name=App.door43_bucket,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.pre_convert_s3_handler = S3Handler(bucket_name=App.pre_convert_bucket,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.job_db_handler = DynamoDBHandler(table_name=App.job_table_name,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.module_db_handler = DynamoDBHandler(table_name=App.module_table_name,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.language_stats_db_handler = DynamoDBHandler(table_name=App.language_stats_table_name,\n aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.lambda_handler = LambdaHandler(aws_access_key_id=App.aws_access_key_id,\n aws_secret_access_key=App.aws_secret_access_key,\n aws_region_name=App.aws_region_name)\n App.gogs_handler = GogsHandler(gogs_url=App.gogs_url)\n\n @classmethod\n def setup_db(cls, echo=False):\n \"\"\"\n :param bool echo:\n \"\"\"\n if not App.db_connection_string:\n App.db_connection_string = App.construct_connection_string()\n App.db_engine = create_engine(App.db_connection_string, echo=echo)\n session = sessionmaker(bind=App.db_engine)()\n App.db = session\n\n from libraries.models.manifest import TxManifest\n TxManifest.__table__.name = App.manifest_table_name\n App.create_tables([TxManifest.__table__])\n return session\n\n @classmethod\n def create_tables(cls, tables=None):\n App.ModelBase.metadata.create_all(App.db_engine, tables=tables)\n\n @classmethod\n def construct_connection_string(cls):\n db_connection_string = App.db_protocol+'://'\n if App.db_user:\n db_connection_string += App.db_user\n if App.db_pass:\n db_connection_string += ':'+App.db_pass\n if App.db_end_point:\n db_connection_string += '@'\n if App.db_end_point:\n db_connection_string += App.db_end_point\n if App.db_port:\n db_connection_string += ':'+App.db_port\n if App.db_name:\n db_connection_string += '/'+App.db_name\n if App.db_connection_string_params:\n db_connection_string += '?'+App.db_connection_string_params\n return db_connection_string\n","sub_path":"libraries/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"650681029","text":"import os\nimport pickle\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport MyPackageCommon.Constants as cst\n\n\nclass NNParams:\n targetLabels = [\n cst.labels.learnTarget,\n cst.labels.learnTargetNot\n ]\n\n resize = 500\n colorConvertMode = \"L\" # \"RGB\"\n imageDepth = 2 # by colorConvertMode\n imageSize = resize * resize * imageDepth\n\n targetLabelSize = len(targetLabels)\n\n learnRate = 0.05\n epochs = 100\n\n batchSize = 1\n\n targetAccuracy = 0.99\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 10 ,10) # param 1 is need \"1\".\n self.pool = nn.MaxPool2d(10,10)\n self.shapeConv1 = None\n self.conv2 = nn.Conv2d(10,10,10)\n self.fc1=None\n self.fc2 = nn.Linear(50, 20)\n self.fc3 = nn.Linear(20, 2)\n\n self.loadStructure()\n\n def loadStructure(self):\n if os.path.exists(cst.savePath.structure):\n with open(cst.savePath.structure, \"rb\") as f:\n modelElems = pickle.load(f)\n self.fc1 = modelElems[0]\n self.setup = True\n else:\n self.setup = False\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n # x = self.pool(F.relu(self.conv2(x)))\n x = torch.flatten(x, 1) # flatten all dimensions except batch\n\n if not self.setup:\n self.fc1 = nn.Linear(x.shape[1], 50)\n with open(cst.savePath.structure, \"wb\") as f:\n modelElems = [self.fc1]\n pickle.dump(modelElems, f)\n print(\"network auto setup\")\n print(self)\n self.setup = True\n\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nclass FFNN(nn.Module):\n def __init__(self):\n super(FFNN, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(NNParams.imageSize, 10),\n nn.Sigmoid(),\n nn.Linear(10, NNParams.targetLabelSize),\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nclass CnnSample(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = torch.flatten(x, 1) # flatten all dimensions except batch\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nif __name__ == '__main__':\n net = CNN()\n","sub_path":"MyPackageNetwork/NetWork.py","file_name":"NetWork.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264129075","text":"import sys, os, random\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom textblob import TextBlob\n\n# Function utils\ndef starts_with_vowel(string):\n return string[0] in 'aeiou'\n\n# User's chat line\ndef prompt(client_name = False):\n\n if(client_name == False):\n client_name = \"Bot\"\n \n sys.stdout.write('<' + client_name + '>')\n sys.stdout.flush()\n\n# Responce constants\nGREETING_KEYWORDS = (\"hello\", \"hi\", \"greetings\", \"sup\", \"what's up\",)\nGREETING_RESPONSES = [\"'sup bro\", \"hey\", \"*nods*\", \"hey you got my snap?\"]\n\n# Process user's input\ndef find_proper_noun(sentence): # Find \"Sustantivo\"\n tags = sentence.tags\n proper_noun = None\n\n for word, part_of_speech in tags:\n if part_of_speech.startswith('NNP'):\n proper_noun = word\n\n return proper_noun\n\n\ndef find_verb(sentence):\n tags = sentence.tags\n verb = None\n pos = None # What kind of verb is\n\n for word, part_of_speech in tags:\n if part_of_speech.startswith('VB'):\n verb = word\n pos = part_of_speech\n\n return verb, pos\n\ndef find_adjective(sentence):\n tags = sentence.tags\n adjective = None\n\n for word, part_of_speech in tags:\n if part_of_speech == 'JJ':\n adjective = word\n return adjective\n\ndef find_noun(sentence):\n tags = sentence.tags\n noun = None\n\n for word, part_of_speech in tags:\n if part_of_speech == 'PRP':\n noun = word\n return noun\n \ndef find_pronoun(sentence):\n tags = sentence.tags\n pronoun = None\n\n for word, part_of_speech in tags:\n if part_of_speech == 'PRP' and word.lower() == 'you': # The user is refering to bot\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I': # The user mentioned itself\n pronoun = 'You'\n return pronoun\n\ndef find_candidate_parts_of_speech(sentence): # Process user's input\n text = TextBlob(sentence) # Preprocess incoming text\n\n pronoun = None\n noun = None\n adjective = None\n verb = None\n proper_noun = None\n\n for sentence in text.sentences:\n pronoun = find_pronoun(sentence)\n noun = find_noun(sentence)\n adjective = find_adjective(sentence)\n verb, verb_pos = find_verb(sentence)\n proper_noun = find_proper_noun(sentence)\n\n return pronoun, noun, adjective, verb, proper_noun\n\n# Assemble bot response\n# Template for responses that include a direct noun which is indefinite/uncountable\nSELF_VERBS_WITH_NOUN_CAPS_PLURAL = [\n \"My last startup totally crushed the {noun} vertical\",\n \"Were you aware I was a serial entrepreneur in the {noun} sector?\",\n \"My startup is Uber for {noun}\",\n \"I really don't consider myself an expert on {noun}\",\n]\n\nSELF_VERBS_WITH_NOUN_LOWER = [\n \"Yeah but I know a lot about {noun}\",\n \"My bros always ask me about {noun}\",\n]\n\nSELF_VERBS_WITH_ADJECTIVE = [\n \"I'm personally building the {adjective} Economy\",\n \"I consider myself to be a {adjective}preneur\",\n]\n\ndef check_for_comment_about_bot(pronoun, noun, adjective):\n resp = None\n if pronoun == \"I\" and (noun or adjective):\n if noun:\n if random.choice((True, False)):\n resp = random.choice(SELF_VERBS_WITH_NOUN_CAPS_PLURAL).format(**{ 'noun' : noun.pluralize().capitalize() })\n else:\n resp = random.choice(SELF_VERBS_WITH_NOUN_LOWER).format(**{ 'noun' : noun })\n else:\n resp = random.choice(SELF_VERBS_WITH_ADJECTIVE).format(**{ 'adjective' : adjective })\n return resp\n\n# Construct bot's response based of the user's input\ndef construct_response(pronoun, noun, verb):\n resp = []\n\n if(pronoun is None or noun is None or verb is None):\n return \"Sorry i couldn't understand.\"\n\n if pronoun:\n resp.append(pronoun)\n \n if verb:\n verb_word = verb\n if verb_word in ('be', 'am', 'is', \"'m\"):\n if pronoun.lower() == 'you':\n resp.append(\"aren't really\")\n else:\n resp.append(verb_word)\n\n if noun:\n pronoun = \"an\" if starts_with_vowel(noun) else \"a\"\n resp.append(pronoun + \" \" + noun)\n \n resp.append(random.choice((\"tho\", \"bro\", \"lol\", \"bruh\", \"smh\",\"\")))\n\n return \" \".join(resp)\n\n# Simple greeting check\ndef check_for_greeting(sentence):\n \"\"\" If any word in the input sentence is a greeting, return a greeting \"\"\"\n words = word_tokenize(sentence)\n for word in words:\n if word in GREETING_KEYWORDS:\n return random.choice(GREETING_RESPONSES)\n\ndef main():\n prompt('You')\n while 1:\n sentence = sys.stdin.readline()\n if(sentence):\n pronoun, noun, adjective, verb, proper_noun = find_candidate_parts_of_speech(sentence)\n bot_response = check_for_greeting(sentence) or check_for_comment_about_bot(pronoun, noun, adjective) or construct_response(pronoun, noun, verb)\n prompt()\n print(bot_response)\n prompt('You')\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt: # Catch ctrl + c execption\n pass\n finally:\n os.system('cls||clear')\n print(\"Thanks for trying me out. Have a nice day ;)\")\n\n\n# pronoun, noun, adjective, verb, proper_noun = find_candidate_parts_of_speech(sentence)\n# print(\"Pronoun=%s\" % pronoun, \"Noun=%s\" % noun, \"Adjective=%s\" % adjective, \"Verb=%s\" % verb, \"ProperNoun=%s\" % proper_noun)","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"238351793","text":"matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\nfor c1 in range (0, 3) :\n for c2 in range(0, 3) :\n matriz[c1][c2] = int(input((f'Digite um valor [{c1}, {c2}]: ')))\n\nprint('=' * 40)\n\nfor c1 in range(0, 3) :\n for c2 in range(0, 3) :\n print(f'[{matriz[c1][c2]:^5}]', end= '')\n print()\nprint(matriz)","sub_path":"PhythonExercicios/ex087.py","file_name":"ex087.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432987017","text":"# coding: utf-8\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom builtins import * # noqa\nfrom functools import update_wrapper, wraps\nfrom itertools import count, cycle\n\nfrom .chars import DLQUO, LAQUO, LDQUO, LSQUO, RAQUO, RDQUO, RSQUO\nfrom .utils import re_compile\n\n__all__ = ('EscapePhrases', 'EscapeHtml', 'Quotes', 'Expressions')\n\n\ndef tail_processor(text, *args, **kwargs):\n return text\n\n\nclass BaseProcessor(object):\n \"\"\"\n Processors are the core of Typus. See subclasses for examples.\n \"\"\"\n\n def __init__(self, typus):\n # Makes possible to decorate processor\n update_wrapper(self, self.__class__, updated=())\n\n # Stores Typus to access it's configuration\n self.typus = typus\n\n def __call__(self, typus):\n raise NotImplementedError\n\n def __radd__(self, other):\n return self(other or tail_processor)\n\n\nclass EscapePhrases(BaseProcessor):\n \"\"\"\n Escapes phrases which should never be processed.\n\n >>> en_typus('Typus turns `(c)` into \"(c)\"', escape_phrases=['`(c)`'])\n 'Typus turns `(c)` into “©”'\n\n Also there is a little helper :func:`typus.utils.splinter` which should\n help you to split string into the phrases.\n \"\"\"\n\n placeholder = '{{#phrase{0}#}}'\n\n def __call__(self, func):\n @wraps(self, updated=())\n def inner(text, *args, **kwargs):\n storage = []\n counter = count()\n escaped = self._save_values(text, storage, counter, **kwargs)\n\n # Runs typus\n processed = func(escaped, *args, **kwargs)\n if not storage:\n return processed\n\n restored = self._restore_values(processed, storage, **kwargs)\n return restored\n return inner\n\n def _save_values(self, text, storage, counter, escape_phrases=(), **kwargs):\n for phrase in escape_phrases:\n if not phrase.strip():\n continue\n key = self.placeholder.format(next(counter))\n text = text.replace(phrase, key)\n storage.append((key, phrase))\n return text\n\n def _restore_values(self, text, storage, **kwargs):\n \"\"\"\n Puts data into the text in reversed order.\n It's important to loop over and restore text step by step\n because some 'stored' chunks may contain keys to other ones.\n \"\"\"\n for key, value in reversed(storage):\n text = text.replace(key, value)\n return text\n\n\nclass EscapeHtml(EscapePhrases):\n \"\"\"\n Extracts html tags and puts them back after.\n\n >>> en_typus('Typus turns (c) into \"(c)\"')\n 'Typus turns (c) into “©”'\n\n .. caution::\n Doesn't support nested ```` tags.\n \"\"\"\n\n placeholder = '{{#html{0}#}}'\n skiptags = 'head|iframe|pre|code|script|style|video|audio|canvas'\n patterns = (\n re_compile(r'(<)({0})(.*?>.*?)'.format(skiptags)),\n # Doctype, xml, closing tag, any tag\n re_compile(r'(<[\\!\\?/]?[a-z]+.*?>)'),\n # Comments\n re_compile(r'(<\\!\\-\\-.*?\\-\\->)'),\n )\n\n def _save_values(self, text, storage, counter, **kwargs):\n for pattern in self.patterns:\n text = pattern.sub(self._replace(storage, counter), text)\n return text\n\n def _replace(self, storage, counter):\n def inner(match):\n key = self.placeholder.format(next(counter))\n html = ''.join(match.groups())\n storage.append((key, html))\n return key\n return inner\n\n\nclass Quotes(BaseProcessor):\n \"\"\"\n Replaces regular quotes with typographic ones.\n Supports any level nesting, but doesn't work well with minutes ``1'``\n and inches ``1\"`` within the quotes, that kind of cases are ignored.\n Use it with :class:`typus.mixins.RuQuotes` or\n :class:`typus.mixins.EnQuotes` or provide Typus attributes\n ``loq, roq, leq, req`` with custom quotes.\n\n >>> en_typus('Say \"what\" again!')\n 'Say “what” again!'\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Quotes, self).__init__(*args, **kwargs)\n\n # Odd and even levels: left, right\n self.loq, self.roq = self.typus.loq, self.typus.roq\n self.leq, self.req = self.typus.leq, self.typus.req\n\n # Pairs of odd and even quotes. Already *switched* in one dimension.\n # See :meth:`_switch_nested` for more help.\n self.switch = (self.loq + self.req, self.leq + self.roq)\n\n # Replaces all quotes with `'`\n quotes = ''.join((LSQUO, RSQUO, LDQUO, RDQUO, DLQUO, LAQUO, RAQUO))\n self.re_normalize = re_compile(r'[{0}]'.format(quotes))\n\n # Matches nested quotes (with no quotes within)\n # and replaces with odd level quotes\n self.re_normal = re_compile(\n # No words before\n r'(?>> from typus.core import TypusCore\n >>> from typus.processors import Expressions\n ...\n >>> class MyExpressionsMixin:\n ... def expr_bold_price(self):\n ... expr = (\n ... (r'(\\$\\d+)', r'\\1'),\n ... )\n ... return expr\n ...\n >>> class MyTypus(MyExpressionsMixin, TypusCore):\n ... expressions = ('bold_price', ) # no prefix `expr_`!\n ... processors = (Expressions, )\n ...\n >>> my_typus = MyTypus() # `expr_bold_price` is compiled and stored\n >>> my_typus('Get now just for $1000!')\n 'Get now just for $1000!'\n\n .. note::\n *Expression* is a pair of regex and replace strings. Regex strings are\n compiled with :func:`typus.utils.re_compile` with a bunch of flags:\n unicode, case-insensitive, etc. If that doesn't suit for you pass your\n own flags as a third member of the tuple: ``(regex, replace, re.I)``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Expressions, self).__init__(*args, **kwargs)\n\n # Compiles expressions\n self.compiled_exprs = [\n (re_compile(*group[::2]), group[1])\n for name in self.typus.expressions\n for group in getattr(self.typus, 'expr_' + name)()\n ]\n\n def __call__(self, func):\n @wraps(self, updated=())\n def inner(text, *args, **kwargs):\n # Applies expressions\n for expr, repl in self.compiled_exprs:\n text = expr.sub(repl, text)\n text = func(text, *args, **kwargs)\n return text\n return inner\n","sub_path":"typus/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":9026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"248717657","text":"global ciudades\r\nglobal inventario\r\nglobal supermercado\r\nglobal usuarios\r\nciudades = open('archivos\\\\ciudades.txt')\r\ninventario = open('archivos\\\\Inventario.txt')\r\nsupermercado = open('archivos\\\\Supermercado.txt')\r\nusuarios = open('archivos\\\\Usuarios.txt')\r\n###LOL\r\ndef principal():\r\n listaciudades = []\r\n temp = []\r\n facturas = []\r\n global ciudades1\r\n ciudades1 = crearlista(ciudades, 0, 1, listaciudades)\r\n for i in ciudades1:\r\n temp = i\r\n listaciudades += [temp[0]]\r\n global supermercado1\r\n supermercado1 = crearlista(supermercado, 1, 3, listaciudades)\r\n global inventario1\r\n inventario1 = crearlista(inventario, 0, 2, listaciudades)\r\n global usuarios1\r\n usuarios1 = crearlista(usuarios, 1, 4, listaciudades)\r\n menu(facturas, ciudades1, inventario1, supermercado1, usuarios1)\r\n \r\ndef menu(fac,a,b,c,d):\r\n print(\"------------\")\r\n print(\"1. Ingresar\")\r\n print(\"------------\")\r\n print(\"2. Registrarse\")\r\n print(\"------------\")\r\n print(\"3. Salir\")\r\n eleccion = 0\r\n eleccion = int(input(\"Seleccione una opción:\"))\r\n print(eleccion)\r\n if eleccion == 1:\r\n print(\"ingrese su usuario\")\r\n usu1= \"\"\r\n cont1 = \"\"\r\n lol = 1\r\n usu1 = str(input())\r\n if lol == 1:\r\n for i in d:\r\n if usu1 == i[2]:\r\n print(\"ingrese su codigo de usuario, \", usu1)\r\n cont1 = input()\r\n if cont1 == i[1]:\r\n print(\"Bienvenido\")\r\n if int(i[4]) == 0:\r\n menu1(fac,a,b,c,d, i[1])\r\n elif int(i[4]) == 1 or int(i[4]) == 2:\r\n menu2(fac,a,b,c,d, i[1])\r\n break\r\n else:\r\n print(\"codigo incorrecta\")\r\n menu(fac,a,b,c,d)\r\n else:\r\n lol = 0\r\n if lol == 0:\r\n print(\"Ese usuario no existe\")\r\n print(usu1)\r\n menu(fac,a,b,c,d)\r\n elif eleccion == 2:\r\n xx = newuser(fac,a,b,c,d)\r\n if xx != []:\r\n d = d + [xx]\r\n print(\"Usuario agregado con exito\")\r\n input()\r\n menu(fac,a,b,c,d)\r\n else:\r\n print(\"Error en los datos\")\r\n menu(fac,a,b,c,d)\r\n elif eleccion == 3:\r\n archi = open(\"TOTALES.txt\", \"w\")\r\n archi.write(str(fac))\r\n archi.write(str(a))\r\n archi.write(str(b))\r\n archi.write(str(c))\r\n archi.write(str(d))\r\n archi.close()\r\n exit()\r\n else:\r\n print(\"Ingrese una opción existente\")\r\n menu(fac,a,b,c,d)\r\ndef newuser(fac,a,b,c,d):\r\n newser = \"\"\r\n newpass = \"\"\r\n city = \"\"\r\n cedula = \"\"\r\n linea = []\r\n print(\"Ingrese su ciudad de procedencia\")\r\n city = str(input())\r\n for i in a:\r\n if city in i:\r\n print(\"Ingrese su código\")\r\n cedula = str(input())\r\n print(\"Ingrese su nombre de usuario de preferencia\")\r\n newser = str(input())\r\n print(\"ingrese un número telefónico\")\r\n newpass = str(input())\r\n if newpass != \"\" and newser != \"\":\r\n linea = [city] + [cedula] + [newser] + [newpass] + [\"0\"]\r\n return linea\r\ndef menu1(fac,a,b,c,d,e):\r\n print(\"menu de Cliente\")\r\n print(\"------------\")\r\n print(\"1. Hacer pedido\")\r\n print(\"------------\")\r\n print(\"2. Salir\")\r\n print(\"------------\")\r\n eleccion = 0\r\n eleccion = int(input(\"Seleccione una opción:\"))\r\n print(eleccion)\r\n if eleccion == 1:\r\n hacerpedido(fac,a,b,c,d,e)\r\n elif eleccion == 2:\r\n menu(fac,a,b,c,d)\r\n else:\r\n print(\"Ingrese una opción existente\")\r\n menu1(fac,a,b,c,d,e)\r\ndef menu2(fac,a,b,c,d,e):\r\n print(\"menu de Funcionarios\")\r\n print(\"------------\")\r\n print(\"1. ingresar un producto\")\r\n print(\"------------\")\r\n print(\"2. eliminar productos\")\r\n print(\"------------\")\r\n print(\"3. consultar precios\")\r\n print(\"------------\")\r\n print(\"4. consultar ultimo descuento\")\r\n print(\"------------\")\r\n print(\"5. consultar productos\")\r\n print(\"------------\")\r\n print(\"6. Hacer pedido\")\r\n print(\"------------\")\r\n print(\"7. Salir\")\r\n print(\"------------\")\r\n eleccion = 0\r\n eleccion = int(input(\"Seleccione una opción:\"))\r\n print(eleccion)\r\n if eleccion == 1:\r\n meterproducto(fac,a,b,c,d,e)\r\n elif eleccion == 2:\r\n delproductos(fac,a,b,c,d,e)\r\n elif eleccion == 3:\r\n consulprecios(fac,a,b,c,d,e)\r\n elif eleccion == 4:\r\n consuldescuentos(fac,a,b,c,d,e)\r\n elif eleccion == 5:\r\n consulproductos(fac,a,b,c,d,e)\r\n elif eleccion == 7:\r\n menu(fac,a,b,c,d)\r\n elif eleccion == 6:\r\n hacerpedido(fac,a,b,c,d,e)\r\n else:\r\n print(\"Ingrese una opción existente\")\r\n menu2(fac,a,b,c,d,e)\r\ndef meterproducto(fac,a,b,c,d,e):\r\n print(\"Ingresar productos\")\r\n print(\"-----------------\")\r\n print(\"Ingrese el supermercado\")\r\n supr = input()\r\n suprr = False\r\n print(\"\")\r\n print(\"Ingrese el codigo de producto\")\r\n cod = input()\r\n flag = 0\r\n print(c)\r\n for i in c:\r\n if int(supr) == int(i[1]):\r\n suprr = True\r\n else:\r\n flag = 2\r\n if suprr == True:\r\n for i in b:\r\n if cod != i[1]:\r\n flag = 0\r\n break\r\n else:\r\n flag = 1\r\n if flag == 0:\r\n print(\"Ingrese la cantidad\")\r\n cant = input()\r\n print(\"Ingrese el precio\")\r\n prec = input()\r\n prodnuevo = [supr] + [cod] + [cant] + [prec]\r\n b = b + [prodnuevo]\r\n print(\"producto agregado\")\r\n input()\r\n menu2(fac,a,b,c,d,e)\r\n elif flag == 1:\r\n print(\"Ese código ya existe en el supermercado\")\r\n input()\r\n meterproducto(fac,a,b,c,d,e)\r\n elif flag == 2:\r\n print(\"Supermercado incorrecto\")\r\n input()\r\n meterproducto(fac,a,b,c,d,e)\r\n\r\ndef delproductos(fac,a,b,c,d,e):\r\n print(\"Borrar productos\")\r\n cont = 0\r\n for i in b:\r\n cont = cont + 1\r\n print(cont, i)\r\n print(\"Elija el producto que desea borrar\")\r\n prod = input()\r\n for i in b:\r\n if prod in i:\r\n del b[b.index(i)]\r\n print(\"El producto ha sido borrado\")\r\n break\r\n input()\r\n menu2(fac,a,b,c,d,e)\r\n \r\ndef consulprecios(fac,a,b,c,d,e):\r\n print(\"Consultar precios\")\r\n print(\"Elija el producto que desea ver\")\r\n prod = input()\r\n for i in b:\r\n if i != []:\r\n if int(prod) == int(i[1]):\r\n print(\"El precio es \", i[3])\r\n input()\r\n menu2(fac,a,b,c,d,e)\r\n \r\n\r\ndef consuldescuentos(fac,a,b,c,d,e):\r\n print(\"consultar descuentos\")\r\n print(\"Ingrese el codigo de usuario\")\r\n cod = input()\r\n print(\"\")\r\n archivo = open(cod + \".txt\", \"r\")\r\n linea = archivo.readline()\r\n lineas = []\r\n while linea != \"\":\r\n lineas = lineas + [linea]\r\n linea = archivo.readline()\r\n print(lineas[len(lineas)-2])\r\n archivo.close()\r\n input()\r\n menu2(fac,a,b,c,d,e)\r\n \r\ndef consulproductos(fac,a,b,c,d,e):\r\n print(\"consultar productos\")\r\n cont = 0\r\n for i in b:\r\n cont = cont + 1\r\n print(cont, i[1])\r\n print(\"Elija el producto que desea ver\")\r\n prod = input()\r\n for i in b:\r\n if prod in i:\r\n produ = (b[b.index(i)])\r\n break\r\n print(\"Supermercado: \", produ[0])\r\n print(\"Codigo: \", produ[1])\r\n print(\"Cantidad: \", produ[2])\r\n print(\"Precio: \", produ[3])\r\n input()\r\n menu2(fac,a,b,c,d,e)\r\n \r\ndef hacerpedido(fac,a,b,c,d, ced):\r\n print(\"---------\")\r\n print(\"Hacer pedido\")\r\n print(\"---------\")\r\n m = 1\r\n tempcod = \"\"\r\n usuario = []\r\n cod = 0\r\n eleccion = \"si\"\r\n total = 0\r\n totaldesc = 0\r\n totalfinal = 0\r\n cant = 0\r\n flag = 0\r\n for i in d:\r\n if ced in i:\r\n usuario = i\r\n listafactura = []\r\n listafactura = listafactura + [usuario[2]]\r\n listafactura = listafactura + [ced]\r\n listafactura = listafactura + [usuario[3]]\r\n listafactura = listafactura + [usuario[0]]\r\n codsuper = \"\"\r\n xox = 1\r\n for i in c:\r\n if i[0] == usuario[0]:\r\n codsuper = i[1]\r\n listafactura = listafactura + [codsuper]\r\n tempinventario = b\r\n while m == 1:\r\n print(\"ingrese el codigo de su producto\")\r\n tempcod = input()\r\n for i in range(0,len(b)):\r\n linea = b[i]\r\n cod = b.index(linea)\r\n if codsuper == linea[0] and tempcod == linea[1]:\r\n while xox == 1:\r\n print(\"Ingrese la cantidad\")\r\n cant = int(input())\r\n if cant > int(linea[2]):\r\n print(\"Disculpe no tenemos esa cantidad\")\r\n else:\r\n xox = 0\r\n flag = 0\r\n total = total + (int(linea[3]) * cant)\r\n linea[2] = str(int(linea[2]) - cant)\r\n \r\n listafactura = listafactura + [linea[1]]\r\n listafactura = listafactura + [linea[3]]\r\n listafactura = listafactura + [str(cant)]\r\n xox = 1\r\n b[i] = linea\r\n break\r\n elif tempcod == linea[1]:\r\n while xox == 1:\r\n print(\"Ingrese la cantidad\")\r\n cant = int(input())\r\n if cant > int(linea[2]):\r\n print(\"Disculpe no tenemos esa cantidad\")\r\n else:\r\n xox = 0\r\n flag = 1\r\n total = total + (int(linea[3]) * cant)\r\n linea[2] = str(int(linea[2]) - cant)\r\n listafactura = listafactura + [linea[1]]\r\n listafactura = listafactura + [linea[3]]\r\n listafactura = listafactura + [str(cant)]\r\n xox = 1\r\n b[i] = linea\r\n break\r\n else:\r\n flag = 2\r\n print(\"Desea ingresar más productos? si/no\")\r\n eleccion = input()\r\n if eleccion == \"si\":\r\n m = 1\r\n elif eleccion == \"no\":\r\n m = 0\r\n n = 1\r\n while n == 1:\r\n print(\"---------------------\")\r\n print(\"Seleccione una opción\")\r\n print(\"---------------------\")\r\n print(\"1. hacer cambios\")\r\n print(\"2. Facturar\")\r\n print(\"---------------------\")\r\n opcion = input()\r\n if opcion == \"1\":\r\n for i in range(5, len(listafactura), 3):\r\n print(\"-- \" + listafactura[i])\r\n print(\"Seleccione el producto que desea cambiar\")\r\n codigo = input()\r\n for i in range(5, len(listafactura), 3):\r\n if int(codigo) == int(listafactura[i]):\r\n for e in b:\r\n if int(codigo) == int(e[1]):\r\n listi = b.pop(b.index(e))\r\n print(\"Ingrese la cantidad a eliminar (Menor que \" + listafactura[i + 2] + \")\")\r\n cant = input()\r\n if int(cant) <= int(listi[2]):\r\n listi[2] = str(int(listi[2]) + int(cant))\r\n listafactura[i + 2] = str(int(listafactura[i + 2]) - int(cant))\r\n total = total - (int(listi[3]) * int(cant))\r\n else:\r\n print(\"bump\")\r\n break\r\n else:\r\n print(\"bump2\")\r\n b = b + [listi]\r\n elif opcion == \"2\":\r\n if total >= 5000 and total <= 10000:\r\n totaldesc = total*5/100\r\n totalfinal = total - totaldesc\r\n elif total >= 10001 and total <= 50000:\r\n totaldesc = total*7/100\r\n totalfinal = total - totaldesc\r\n elif total >= 50001:\r\n totaldesc = total*10/100\r\n totalfinal = total - totaldesc\r\n else:\r\n totaldesc = 0\r\n totalfinal = total - totaldesc\r\n listafactura = listafactura + [str(total)]\r\n listafactura = listafactura + [str(totaldesc)]\r\n listafactura = listafactura + [str(totalfinal)]\r\n fac = fac + [listafactura]\r\n f = open(str(int(ced)) + \".txt\", \"w\")\r\n f.write(\"Factura\\n\")\r\n f.write(\"---------\\n\")\r\n f.write(listafactura[1])\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write(\"Nombre: \" + listafactura[0])\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write(\"teléfono: \" + listafactura[2])\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write(\"Ciudad: \" + listafactura[3])\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write(\"Supermercado: \" + listafactura[4])\r\n f.write(\"\\n\")\r\n f.write(\"Productos:\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n for i in range(5, len(listafactura) - 3, 3):\r\n f.write(listafactura[i] + \" \")\r\n f.write(listafactura[i + 1] + \" \")\r\n f.write(\"Cantidad: \" + listafactura[i + 2])\r\n f.write(\"\\n\")\r\n indice = i + 2\r\n indice += 1\r\n f.write(\"\\n\")\r\n f.write(\"Total: \" + listafactura[indice])\r\n indice += 1\r\n f.write(\"\\n\")\r\n f.write(\"descuento: \" + listafactura[indice])\r\n indice += 1\r\n f.write(\"\\n\")\r\n f.write(\"Total a pagar: \" + listafactura[indice])\r\n f.close()\r\n n = 0\r\n print(\"\\n\\nGracias por su compra, datos ingresados correctamente\")\r\n input()\r\n if int(usuario[4]) == 0: \r\n menu1(fac,a,b,c,d, ced)\r\n else:\r\n menu2(fac,a,b,c,d,ced)\r\n\r\n\r\ndef crearlista(archivo, campo, caso, city):\r\n lista = []\r\n templista = []\r\n templista2 = []\r\n tempstring = \"\"\r\n vali = 4\r\n cont = 0\r\n for line in archivo:\r\n for item in line:\r\n if item != \",\":\r\n tempstring = tempstring + str(item)\r\n else:\r\n templista = templista + [tempstring]\r\n tempstring = \"\"\r\n templista = templista + [tempstring]\r\n tempstring = \"\"\r\n if lista == []:\r\n lista = lista + [templista]\r\n vali = validacion(templista, lista, campo, caso, city)\r\n if vali == 0:\r\n lista = lista + [templista]\r\n elif vali == 2:\r\n templista[4] = str(2)\r\n for i in lista:\r\n if i[:-1] == templista[:-1]:\r\n del(lista[cont])\r\n break\r\n cont += 1\r\n lista = lista + [templista]\r\n elif vali == 3:\r\n templista[4] = str(0)\r\n lista = lista + [templista]\r\n templista = []\r\n templista2 = []\r\n return (lista)\r\n\r\n\r\ndef validacion(listatemp, lista, campo, caso, city):\r\n templista2 = []\r\n flag = 0\r\n if caso == 1:\r\n for i in lista:\r\n templista2 = i\r\n if listatemp[campo] != templista2[campo]:\r\n flag = 0\r\n else:\r\n flag = 1\r\n break\r\n elif caso == 2:\r\n for i in lista:\r\n templista2 = i\r\n if listatemp[campo] != templista2[campo]:\r\n flag = 0\r\n else:\r\n if listatemp[1] != templista2[1]:\r\n flag = 0\r\n else:\r\n flag = 1\r\n break\r\n elif caso == 3:\r\n for i in lista:\r\n templista2 = i\r\n if listatemp[campo] != templista2[campo]:\r\n flag = 0\r\n else:\r\n flag = 1\r\n break\r\n if int(listatemp[1]) != int(templista2[1]):\r\n flag = 0\r\n else:\r\n flag = 1\r\n break\r\n elif caso == 4:\r\n cont = 0\r\n for i in lista:\r\n templista2 = i\r\n if listatemp[0] in city:\r\n flag = 0\r\n else:\r\n flag = 1\r\n break\r\n if listatemp[1] == templista2[1] and listatemp[0] != templista2[0]:\r\n flag = 1\r\n break\r\n if int(listatemp[4]) <= 2:\r\n if listatemp[campo] != templista2[campo]:\r\n flag = 0\r\n else:\r\n if listatemp[4] != templista2[4]:\r\n flag = 2\r\n else:\r\n flag = 1\r\n break\r\n else:\r\n \r\n flag = 3\r\n else:\r\n print(\"Error\")\r\n return flag\r\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":18005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"639360397","text":"from os import listdir\nfrom gensim import corpora, models\n\n\nnum_topics = 100\nnum_words = 20\ndir_docs = './parsed_docs/'\nfiles = sorted(listdir(dir_docs))\ntexts = []\n\nfor file in files:\n with open(dir_docs+file, 'r') as f:\n texts.append(f.read().split())\n\n\n# dictionary creation\ndictionary = corpora.Dictionary(texts)\nprint(dictionary)\ndictionary.filter_extremes(no_below=5, no_above=0.3)\ndictionary.compactify()\nprint(dictionary)\ndictionary.save('government_docs.dict')\n\n# corpus creation\ncorpus = [dictionary.doc2bow(text) for text in texts]\ncorpora.MmCorpus.serialize('government_docs.mm', corpus)\n\n# LDA_model creation\nldamodel = models.LdaModel(corpus, num_topics=num_topics, id2word=dictionary)\nldamodel.save('government_docs.model')\n\n# generated topics\ntopics = ldamodel.print_topics(num_topics=num_topics, num_words=num_words)\nprint(topics)\n\n\n\n","sub_path":"gensim_LDA.py","file_name":"gensim_LDA.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"217211712","text":"__author__ = 'Administrator'\r\n\r\n# Problem 12\r\n\r\n# this is horifically inefficient, rewrite this at some point\r\n\r\nnumbers = []\r\ntriangles = []\r\n\r\ndef get_factors(n):\r\n factors = []\r\n\r\n for i in range(1, n):\r\n if n % i == 0:\r\n factors.append(i)\r\n return factors\r\n\r\nfor n in range(1, 999999):\r\n numbers.append(n)\r\n triangle = sum(numbers)\r\n divisors = get_factors(triangle)\r\n\r\n print(n)\r\n\r\n if(len(divisors) > 500):\r\n print(triangle)\r\n break\r\n\r\nprint(\"Finished\")\r\n\r\n","sub_path":"problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"245020679","text":"from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.shortcuts import get_object_or_404\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, mixins, status, viewsets\nfrom rest_framework.decorators import action, api_view, permission_classes\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.tokens import AccessToken\n\nfrom reviews.models import Category, Genre, Review, Title, User\n\nfrom .filters import TitleFilter\nfrom .permissions import (CustomIsAuthenticated, IsAdmin, IsModerator, IsOwner,\n IsSafeMethod, IsSuperUser)\nfrom .serializers import (CategorySerializer, CommentSerializer,\n CustomUserSerializer, GenreSerializer,\n ReviewSerializer, SignUpSerializer, TitleSerializer,\n TokenCreateSerializer, UserMeSerializer)\n\n\nclass DestroyListCreateViewSet(mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n mixins.CreateModelMixin,\n viewsets.GenericViewSet):\n pass\n\n\nclass ReviewViewSet(viewsets.ModelViewSet):\n permission_classes = [\n CustomIsAuthenticated\n & (IsOwner | IsModerator | IsAdmin | IsSuperUser)\n | IsSafeMethod,\n ]\n serializer_class = ReviewSerializer\n pagination_class = PageNumberPagination\n\n def perform_create(self, serializer):\n title = get_object_or_404(Title, pk=self.kwargs.get(\"titles_id\"))\n return serializer.save(author=self.request.user, title_id=title.id)\n\n def get_queryset(self):\n title_id = self.kwargs.get('titles_id')\n title = get_object_or_404(Title, id=title_id)\n return title.reviews_title.all()\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n permission_classes = [\n CustomIsAuthenticated\n & (IsOwner | IsModerator | IsAdmin | IsSuperUser)\n | IsSafeMethod,\n ]\n serializer_class = CommentSerializer\n pagination_class = PageNumberPagination\n\n def perform_create(self, serializer):\n review = get_object_or_404(Review, id=self.kwargs.get('review_id'))\n return serializer.save(author=self.request.user, review=review)\n\n def get_queryset(self):\n title = get_object_or_404(Title, id=self.kwargs.get('title_id'))\n review = title.reviews_title.get(id=self.kwargs.get('review_id'))\n return review.comments.all()\n\n\nclass TitleViewSet(viewsets.ModelViewSet):\n permission_classes = [\n CustomIsAuthenticated & (IsAdmin | IsSuperUser) | IsSafeMethod\n ]\n serializer_class = TitleSerializer\n pagination_class = PageNumberPagination\n queryset = Title.objects.all()\n filter_backends = [DjangoFilterBackend]\n filterset_class = TitleFilter\n\n\nclass CategoryViewSet(DestroyListCreateViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n filter_backends = [filters.SearchFilter]\n permission_classes = [\n CustomIsAuthenticated & (IsAdmin | IsSuperUser) | IsSafeMethod\n ]\n search_fields = ('name', 'slug')\n lookup_field = 'slug'\n\n\nclass GenreViewSet(DestroyListCreateViewSet):\n queryset = Genre.objects.all()\n serializer_class = GenreSerializer\n filter_backends = [filters.SearchFilter]\n permission_classes = [\n CustomIsAuthenticated & (IsAdmin | IsSuperUser) | IsSafeMethod\n ]\n search_fields = ('name', 'slug')\n lookup_field = 'slug'\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef create_new_user(request):\n serializer = SignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n username = request.data['username']\n email = request.data['email']\n confirmation_code = User.objects.make_random_password()\n send_mail(\n 'Confirmation code from YamDb',\n f'Dear {username}, you confirmation code: {confirmation_code}',\n settings.EMAIL_HOST_USER,\n [email],\n fail_silently=False,\n )\n serializer.save(password=confirmation_code)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef create_access_token(request):\n serializer = TokenCreateSerializer(data=request.data)\n username = request.data.get('username')\n serializer.is_valid(raise_exception=True)\n current_user = get_object_or_404(User, username=username)\n token = AccessToken.for_user(current_user)\n return Response({'token': str(token)}, status=status.HTTP_200_OK)\n\n\nclass CustomUserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = CustomUserSerializer\n lookup_field = 'username'\n search_fields = ('username',)\n permission_classes = [CustomIsAuthenticated & (IsAdmin | IsSuperUser)]\n\n @action(\n detail=False,\n methods=['get', 'patch'],\n permission_classes=(IsAuthenticated,),\n serializer_class=UserMeSerializer\n )\n def me(self, request):\n user_me = User.objects.get(username=self.request.user.username)\n if request.method == 'GET':\n serializer = self.get_serializer(user_me)\n return Response(serializer.data, status=status.HTTP_200_OK)\n serializer = self.get_serializer(\n user_me,\n data=request.data,\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n","sub_path":"api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"367501735","text":"from email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom datetime import datetime\nimport smtplib\nimport os\n\nmsg = MIMEMultipart()\nmsg[\"From\"] = \"pruebas.tecuala@gruposcit.com\"\nmsg[\"To\"] = \"c.eduardo.mdr@gmail.com\"\nmsg[\"Subject\"] = \"Prueba email Carlos\"\n\npath_to_file = 'assets' + os.sep + 'firma.html'\n\narchivo = open(path_to_file, 'r')\nfirma_texto = archivo.read()\n\nfecha = datetime(2018,3,24)\nbody = \"\"\"\n

Hola {usuario} ,

\n
\n

Te recordamos que en la {fecha}, tienes un vencimiento con nosotros por {saldo}

\n
\n{firma}\n\"\"\".format(usuario=\"Juan\", fecha=fecha.strftime(\"%d de %B del %Y\"), saldo=\"$4,321.50\", firma=firma_texto)\n\nbodyHtml = MIMEText(body, 'html', 'utf-8')\nmsg.attach(bodyHtml)\n\nserver = smtplib.SMTP('mail.gruposcit.com', 26)\nserver.starttls()\nserver.login(msg[\"From\"], 'pruebas123@')\nserver.sendmail(msg[\"From\"], msg[\"To\"], msg.as_string())\nserver.quit()\n\nprint(\"El correo fue enviado exitosamente\")\n\n# server = smtplib.SMTP('smtp.gmail.com', 587)","sub_path":"email_test.py","file_name":"email_test.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"509342993","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 15 10:36:28 2021\n\n@author: rio\n\"\"\"\n\nimport os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nos.getcwd()\nos.chdir(\"/home/rio/Dokumente/Uni/Master/Module/Fieldecology/leaf_herbivory/images\")\n\n#pathtest = \"raw/IMG_4735.jpg\"\n#img = cv2.imread(pathtest)\nimg = cv2.imread(\"raw/IMG_4769.jpg\")\nimg = img[500:2200,500:1800]\n\nplt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\n\n# remove non green areas as HLS colortype\ngreen_min = np.array([5, 50, 50],np.uint8)\ngreen_max = np.array([15, 255, 255],np.uint8)\n\nhls = cv2.cvtColor(img,cv2.COLOR_BGR2HLS)\nmask = cv2.inRange(hls, (30,20, 60), (75, 180, 255))\ncrop = cv2.bitwise_and(img, img, mask=mask)\n\n################ not neccessary #######\n# grey image\n\nimg_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nplt.imshow(img_grey, \"Greys_r\")\n\ncrop_grey = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n\n# remove light colors to extract background and holes\nblack = np.array([0])\nlightest_grey = np.array([110])\nmaskgrey = cv2.inRange(img_grey,black,lightest_grey)\ncrop_bw = cv2.bitwise_and(img_grey, img_grey, mask=maskgrey)\n\n\n#histogramm equilisation\n## too strong\nimg_adj = cv2.equalizeHist(img_grey)\n\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\ncl1 = clahe.apply( cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))\n\nplt.imshow(cl1, \"Greys_r\")\n\nseg = cv2.bitwise_and(cl1, cl1, mask=maskBGR)\n\n\n##############remove later\n\n\n## Morphology for closing small holes created by the backgroundseparation\n#lightly\nse = np.ones((7,7), dtype='uint8')\nimg_close_light = cv2.morphologyEx(seg, cv2.MORPH_CLOSE, se)\n\n# strongly \nse = np.ones((20,20), dtype='uint8')\nimg_close = cv2.morphologyEx(binary_crop, cv2.MORPH_CLOSE, se)\n\n# closing completely herbivory for external leave contour\nse = np.ones((100,100), dtype='uint8')\nleaf_binary = cv2.morphologyEx(binary_crop, cv2.MORPH_CLOSE, se)\n\n# number of pixels with holes for area estimation\n\nleaf_pxl = np.count_nonzero(img_close_light)\n\n# counting area without holes:\n\nleaf_whole = np.count_nonzero(leaf_binary)\n\n### percentage\n\nestimate = np.round(leaf_pxl / leaf_whole * 100,2)\n\n\n###### because of chunk on the side we need the convex hull \n\n#### \n#threshold = 10\n\nplt.imshow(canny_output, \"Greys\")\n\ncanny_output = cv2.Canny(leaf_binary, 0,255)\n# find contours\ncontours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\narea = []\nfor x in contours:\n area.append(cv2.contourArea(x))\n\nhull = cv2.convexHull(contours[np.argmax(area)])\n\nhull_list = []\nfor i in range(len(contours)):\n hull = cv2.convexHull(contours[i])\n hull_list.append(hull)\n\n\narea = []\nfor x in hull_list:\n area.append(cv2.contourArea(x))\n","sub_path":"gui/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"284927766","text":"#! /usr/bin/env morseexec\n\nfrom morse.builder import *\nfrom map_setup import setup_warehouse_map\nfrom package_setup import add_packages\nimport json\n\n# specify configuration files\n#warehouse_config_file = os.path.join(os.path.dirname(__file__), '../../configs/smart_factory_config.json')\nwarehouse_config_file = os.path.join(os.path.dirname(__file__), '../../configs/smart_factory_config_simple.json')\nrobot_config_file = os.path.join(os.path.dirname(__file__), '../../configs/robot_config.json')\npackage_config_file = os.path.join(os.path.dirname(__file__), '../../configs/package_config.json')\n\n# read configurations from JSON files\nwith open(warehouse_config_file, 'r') as f:\n map_config = json.load(f)\nwith open(robot_config_file, 'r') as f:\n robot_config = json.load(f)\nwith open(package_config_file, 'r') as f:\n package_configs = json.load(f)\n\n# setup map\nsetup_warehouse_map(map_config, robot_config, simplfy_flag = False)\nadd_packages(map_config, package_configs)\n\n# set 'fastmode' to True to switch to wireframe mode\nenv = Environment('empty_environment.blend', fastmode = True)\n\nmap_size_x = map_config['map']['width']\nmap_size_y = map_config['map']['height']\n\nenv.set_camera_location([-2., 8.0, 4.5])\nenv.set_camera_rotation([0.95, 0, -1.2])\n \n","sub_path":"smart_factory_ss19_g3/factory_morse/auto_smart_factory/auto_factory.py","file_name":"auto_factory.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"214251914","text":"import requests\nimport pprint\n\n# Url from jonblum.net/python\nURL = \"http://api.openweathermap.org/data/2.5/forecast/daily?q=Chicago&units=metric&cnt=10&appid=f5f76fc80be1dfc220492acb706cb7e3\"\n#print(UR)L\nresponse = requests.get(URL)\ndata = response.json()\n#prity printing the output\n#pprint.pprint(data)\nday_list = data['list']\nfor day in day_list:\n daytime_temp = day['temp']['day']\n print('The Temperature today is', daytime_temp)\n","sub_path":"WeatherApp.py","file_name":"WeatherApp.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499930176","text":"# Ejercicio 9-3\nclass usuario:\n def __init__(self, first_name, last_name, semestre, escuela):\n self.first_name = first_name\n self.last_name = last_name\n self.semestre = semestre\n self.escuela = escuela\n\n def __str__(self):\n return 'Nombre: {} \\nApellido: {} \\nSemestre: {} \\nEscuela: {}'.format(self.first_name,self.last_name,self.semestre,self.escuela)\n\nusuario_1 = usuario('Luis','Rodas','Sexto','TEC Monterry')\nusuario_2 = usuario('Alfonso','Hinojosa','Primero','Cbtis 300')\nusuario_3 = usuario('Alexander','Jimenez','Decimo','Escuela de la vida')\n\nprint(usuario_1)\nprint('\\n')\nprint(usuario_2)\nprint('\\n')\nprint(usuario_3)\n","sub_path":"Ago-Dic-2018/ArmandoGarcia/practica2/usuarios.py","file_name":"usuarios.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"26523080","text":"# -*- encoding: utf-8 -*-\n\nfrom mangager import HistoryManager\nimport json\nimport tornado.web\nfrom core.utils import decrypt_token\nfrom setting import JWT_SECRET, JWT_ALGORITHM\nimport const\nfrom utils.token import Token\nfrom base.handler import BaseHandler\n\nclass DelList(tornado.web.RequestHandler):\n\n def delete(self, list_id):\n token = self.request.headers.get('token').decode('utf-8')\n errormsg= {}\n\n res = Token.is_existed(token)\n if res:\n errormsg['errorCode'] = const.INVALIDTOKEN\n errormsg['errorMsg'] = const.MSG[const.INVALIDTOKEN]\n self.write(errormsg)\n return\n\n user_id = str(res['token'])\n\n HistoryManager.delete_list(user_id, list_id)\n errormsg['errorCode'] = const.SUCCESS\n errormsg['errorMsg'] = const.MSG[const.SUCCESS]\n self.write(errormsg)\n\n\nclass AddList(BaseHandler):\n\n def post(self):\n token = self.request.headers.get('token').decode('utf-8')\n errormsg= {}\n\n res = Token.is_existed(token)\n if res:\n errormsg['errorCode'] = const.INVALIDTOKEN\n errormsg['errorMsg'] = const.MSG[const.INVALIDTOKEN]\n self.write(errormsg)\n return\n\n userid = Token.decrypt_token(token, JWT_SECRET, JWT_ALGORITHM).get('id')\n list_name = self.parse_body('name')\n goodlist = self.parse_body('goodlist')\n HistoryManager.add_list(list_name, goodlist, userid)\n errormsg['errorCode'] = const.SUCCESS\n errormsg['errorMsg'] = const.MSG[const.SUCCESS]\n self.write(errormsg)\n\n\n\n","sub_path":"HistoryList/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230089773","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport traceback\r\nimport re\r\n\r\n\r\ndef getHTMLText(url):\r\n try:\r\n r = requests.get(url)\r\n r.raise_for_status()\r\n r.encoding = r.apparent_encoding\r\n return r.text\r\n except:\r\n return \"\"\r\n\r\n\r\ndef getStockList(lst, stockURL):\r\n html = getHTMLText(stockURL)\r\n soup = BeautifulSoup(html, 'html.parser')\r\n a = soup.find_all('a')\r\n for i in a:\r\n try:\r\n href = i.attrs['href']\r\n lst.append((re.findall(r\"[s][hz]\\d{6}\", href)[0])[2:])\r\n except:\r\n continue\r\n return \"\"\r\n\r\n\r\ndef getStockInfo(lst, stockURL, fpath):\r\n for stock in lst:\r\n url = stockURL + stock\r\n html = getHTMLText(url)\r\n try:\r\n if html == \"\":\r\n continue\r\n infoDict = {}\r\n soup = BeautifulSoup(html, 'html.parser')\r\n stockInfo = soup.find('div', attrs={'class': 'stock-quote-wrap'})\r\n\r\n name = stockInfo.find_all('p', attrs={'class': \"title\"})[0]\r\n infoDict.update({'股票名称': name.text.split()[1]})\r\n\r\n for ch in infoDict['股票名称']:\r\n if u'\\u4e00' <= ch <= u'\\u9fff':\r\n contentList = stockInfo.find_all('td')\r\n for i in range(len(contentList)):\r\n key = contentList[i].text.split(':')[0]\r\n value = contentList[i].text.split(':')[1]\r\n infoDict[key] = value\r\n\r\n with open(fpath, 'a', encoding='GB2312') as f:\r\n f.write(str(infoDict) + '\\n')\r\n f.close()\r\n break\r\n except:\r\n traceback.print_exc()\r\n continue\r\n\r\n return \"\"\r\n\r\n\r\ndef main():\r\n stock_list_url = \"http://quote.eastmoney.com/stock_list.html\"\r\n stock_info_url = \"https://www.laohu8.com/hq/s/\"\r\n output_file = 'e:/stockInfo.txt'\r\n slist = []\r\n getStockList(slist, stock_list_url)\r\n getStockInfo(slist[:100], stock_info_url, output_file)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Mooc/week3/crawlBaiduStocks.py","file_name":"crawlBaiduStocks.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"7841173","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\n# definitions\n\n# worm-like chain\ndef WLC(f, p, L, S, x0):\n return (L * (1 - 0.5 * (np.sqrt(kBT / (f * p))) + f / S)) / 1000 + x0\n\n\n# L_app\ndef NewP(L, p, alpha, i):\n C = 8 * (1 - np.cos(alpha / 4))\n ro = i / L\n return p / ((1 + p * i * C * ro) ** 2)\n\n\n# constants\n\nkBT = 4.114 # (pN nm) - Boltzmann factor\nLc = 4092 # contour length (bp)\nL = Lc * 0.34 # contour length (nm)\np = 50 # persistence length (nm)\nS = 1000 # stretch modulus (pN)\nx0 = 0 # offset (nm)\nalpha = 110 # opening angle (degrees)\ni = 1 # number of dimers\n\nforce = []\nextension = []\n\nfor f in range(1, 175):\n f /= 10\n force.append(f)\n extension.append(WLC(f, p, L, S, x0))\n\nfiles = []\nfiles.append('180109_data_031_72.fit')\nfiles.append('180109_data_046_72.fit')\nfiles.append('180109_data_068_72.fit')\n\n\nfor file_all in files:\n\n # read file\n f = open(file_all, 'r')\n data_lines = f.readlines()[1:]\n f.close()\n\n data_force = []\n data_extension = []\n\n for x in data_lines:\n data_force.append(float(x.split()[0]))\n data_extension.append(float(x.split()[2]))\n\n plt.scatter(data_extension, data_force, label=str(file_all), s=10)\n\nplt.plot(extension, force, label=\"WLC\", zorder=100, color=\"black\")\nplt.xlim(0, 1.5)\nplt.ylim(-1, 20)\nplt.xlabel('Extension ($\\mu$m)')\nplt.ylabel('Force (pN)')\nplt.legend(frameon=False)\nplt.tick_params(direction='in', axis=\"both\", bottom=\"on\", top=\"on\", left=\"on\", right=\"on\")\nplt.show()","sub_path":"IHF/IHF_fitfiles.py","file_name":"IHF_fitfiles.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"627111027","text":"from flask import Flask, render_template\r\nfrom sense_hat import SenseHat as shat\r\n\r\nsh = shat()\r\n\r\nfile = \"log.csv\"\r\n\r\ndef get_file_data():\r\n\tlogData = 0\r\n\tlogDataList = []\r\n\twith open(file, \"r\") as f:\r\n\t\tlogData = f.read()\r\n\ttemp = logData.split(\"\\n\")\r\n\t\r\n\tfor iter in temp:\r\n\t\tprint(iter)\r\n\t\tif iter != 'Temperature,Humidity,Pressure':\r\n\t\t\tif iter != '':\r\n\t\t\t\tlogDataList.append(iter.split(\",\"))\r\n\treturn logDataList\r\n\t \r\n\r\n\r\ndef get_sensor_data():\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Funtion der tager data fra sensorerne\r\n\tt = sh.get_temperature()\r\n\th = sh.get_humidity()\r\n\tp = sh.get_pressure()\r\n\treturn [round(t, 1), round(h), round(p, 2)]\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\n\r\n#@app.route('/')\r\n\r\ndef index():\r\n\tfresh = get_sensor_data()\r\n\tdata = get_file_data()\r\n\treturn render_template('index.html', vars=data, fresh=fresh)\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True, host='127.0.0.1')","sub_path":"dag5/flask_opg.py","file_name":"flask_opg.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"306827768","text":"# Copyright (c) 2015 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mock\nimport testtools\n\nfrom shaker.engine.executors import base\nfrom shaker.engine import quorum\n\n\nclass TestOperations(testtools.TestCase):\n\n def test_execute_operation_process_reply(self):\n executor = mock.MagicMock()\n executor.process_reply = mock.Mock(return_value={'samples': []})\n\n agent_id = 'the-agent'\n ex = quorum.ExecuteOperation({agent_id: executor})\n\n message = {\n 'stdout': 'foo',\n 'stderr': '',\n }\n reply = ex.process_reply(agent_id, message)\n\n expected = {\n 'status': 'ok',\n 'samples': [],\n }\n executor.process_reply.assert_called_once_with(message)\n self.assertEqual(expected, reply)\n\n def test_execute_operation_process_reply_with_error(self):\n executor = mock.MagicMock()\n executor.process_reply = mock.Mock(\n side_effect=base.ExecutorException({'stderr': 'sad'}, 'Error!'))\n\n agent_id = 'the-agent'\n ex = quorum.ExecuteOperation({agent_id: executor})\n\n message = {\n 'stdout': 'foo',\n 'stderr': '',\n }\n reply = ex.process_reply(agent_id, message)\n\n expected = {\n 'status': 'error',\n 'stderr': 'sad',\n 'info': 'Error!'\n }\n executor.process_reply.assert_called_once_with(message)\n self.assertDictContainsSubset(expected, reply)\n\n def test_execute_operation_process_reply_with_unhandled_exception(self):\n executor = mock.MagicMock()\n executor.process_reply = mock.Mock(\n side_effect=Exception('Boom!'))\n\n agent_id = 'the-agent'\n ex = quorum.ExecuteOperation({agent_id: executor})\n\n message = {}\n reply = ex.process_reply(agent_id, message)\n\n expected = {\n 'status': 'error',\n 'info': 'Boom!'\n }\n executor.process_reply.assert_called_once_with(message)\n self.assertDictContainsSubset(expected, reply)\n","sub_path":"shaker/tests/test_operations.py","file_name":"test_operations.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"368185318","text":"#coding=utf-8\r\nimport os\r\nimport salt.client\r\nfrom service_base_class import ServiceBase\r\n\r\n\r\nclass GetPeerStatus(ServiceBase):\r\n def serviceOperation(self):\r\n servers = self.request['data']\r\n\r\n local = salt.client.LocalClient()\r\n serviceStatus = local.cmd('*', 'service.status', ['glusterd'])\r\n\r\n allCheck = False\r\n for k in serviceStatus.keys():\r\n if serviceStatus[k] == True:\r\n hostName = k\r\n allCheck = True\r\n break\r\n else:\r\n continue\r\n if allCheck == False:\r\n return {}, 1\r\n\r\n result = local.cmd(hostName, 'glusterfs.peer_status')\r\n peerStatus = result[hostName]\r\n for server in servers:\r\n if server['host_name'] == hostName:\r\n server['peer_state'] = \"Connected\"\r\n continue\r\n for uuid in peerStatus:\r\n peer = peerStatus[uuid]\r\n if server['host_name'] in peer['hostnames'] or server['ip_data'] in peer['hostnames']:\r\n if peer['connected'] == \"1\":\r\n server['peer_state'] = \"Connected\"\r\n elif peer['connected'] == \"0\":\r\n server['peer_state'] = \"Disconnected\"\r\n else:\r\n continue\r\n\r\n return servers, 0\r\n","sub_path":"scripts/get_peer_status.py","file_name":"get_peer_status.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"535689552","text":"from collections import Counter\r\nfrom sys import stderr\r\nt = int(input())\r\nfor cn in range(t):\r\n n = int(input())\r\n al = []\r\n for c in range(n * 2 - 1):\r\n vs = list(map(int, input().split()))\r\n al.extend(vs)\r\n c = Counter(al)\r\n od = [v for v,k in c.items() if k & 1]\r\n if len(od) != n:\r\n print(\"Size mismatch!\", file=stderr)\r\n print(\"Case #%d: %s\" % (cn + 1, \" \".join(map(str, sorted(od)))))\r\n","sub_path":"solutions_5630113748090880_0/Python/lbj/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"415465378","text":"# program to work out copy count between a previous vist and current\n# 15 feb 2020 - phil welsby\n\ndef wiper():\n print('\\n' * 100)\nwiper()\n\nold_bk = int(input('Enter previous black counter: '))\nold_col = int(input('Enter previous colour counter: '))\nnew_bk = int(input('Enter current black meter: '))\nnew_col = int(input('Enter current colour meter: '))\n\ntotal_bk = new_bk - old_bk\ntotal_col = new_col - old_col\n\ntotal_count = total_bk + total_col\n\nprint('Total Black =', total_bk)\nprint('Total Colour =', total_col)\nprint('Total Count =', total_count)\n","sub_path":"misc/copy_count_calc.py","file_name":"copy_count_calc.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230251715","text":"from __future__ import division\nimport numpy as np\n\nN = 100\nP = []\nfor i in range(5):\n P.append(np.array([np.array([-1, 1])[np.random.randint(2)] for i in range(N)]))\n\ndef sign(x):\n if x == 0:\n return 1\n else:\n return np.sign(x)\n\ndef update(V):\n for i, v in enumerate(V):\n updated_val = 0\n for n in range(N):\n updated_val += M[i, n]*V[n]\n v = sign(updated_val)\n\ndef overlap(V, m):\n return (np.matmul(V, P[m].T))/N\n\n\nM = np.empty((N, N))\n\n# create M\nfor p in P:\n M += np.add(M, np.matmul(p, p.T))\n\nfor i in range(N):\n M[i, i] = 0\n\nV = np.array([np.array([-1, 1])[np.random.randint(2)] for i in range(N)])\n\n\n\nfor i in range(100):\n print(overlap(V, 0))\n update(V)\n print(V)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tn/ex/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396590218","text":"# need a data_read, data_sel, data_reduce\n\nimport numpy as np\nimport argparse\nimport intake\nimport io\nimport signal\nimport sys\nimport matplotlib.pyplot as plt\nimport pkg_resources as pkgr\nimport tarfile as tf\nimport xarray as xr\nimport warnings\n\nfrom cmip_basins import generate_basin_codes\n\ntry:\n from om4labs.helpers import try_variable_from_list\nexcept ImportError:\n # DORA mode, works without install.\n # reads from current directory\n from helpers import try_variable_from_list\n\nfrom static_downsampler.static import sum_on_supergrid\nfrom static_downsampler.static import subsample_supergrid\n\npossible_names = {}\npossible_names[\"lon\"] = [\"lon\", \"LON\", \"longitude\", \"LONGITUDE\"]\npossible_names[\"lat\"] = [\"lat\", \"LAT\", \"latitude\", \"LATITUDE\"]\npossible_names[\"time\"] = [\"time\", \"TIME\", \"latitude\"]\npossible_names[\"depth\"] = [\"z_l\", \"depth\", \"DEPTH\"]\npossible_names[\"interfaces\"] = [\"z_i\"]\n\n\nclass DefaultDictParser(argparse.ArgumentParser):\n \"\"\" argparse extention that bypasses error and returns a dict of defaults \"\"\"\n\n def error(self, message):\n actions = self.__dict__[\"_actions\"]\n defaults = {}\n for act in actions[1::]:\n defaults[act.__dict__[\"dest\"]] = act.__dict__[\"default\"]\n return defaults\n\n\ndef date_range(ds):\n \"\"\"Returns a tuple of start year and end year from xarray dataset\n\n Parameters\n ----------\n ds : xarray.Dataset\n Input dataset\n\n Returns\n -------\n tuple\n (start year, end year) for time dimension of the dataset\n \"\"\"\n\n if \"time_bnds\" in list(ds.variables):\n t0 = tuple(ds[\"time_bnds\"].values[0][0].timetuple())[0]\n\n # if end bound is Jan-1, fall back to previous year\n t1 = tuple(ds[\"time_bnds\"].values[-1][-1].timetuple())\n t1 = (t1[0] - 1) if (t1[1:3] == (1, 1)) else t1[0]\n\n else:\n t0 = int(ds[\"time\"].isel({\"time\": 0}).dt.strftime(\"%Y\"))\n t1 = int(ds[\"time\"].isel({\"time\": -1}).dt.strftime(\"%Y\"))\n\n return (t0, t1)\n\n\ndef extract_from_tar(tar, member):\n \"\"\"Loads Xarray DataSet in memory from a file contained inside a tar file\n\n Parameters\n ----------\n tar : tarfile.TarFile\n TarFile object generated through tarfile.open('file')\n member : str\n Name of dataset to extract to memory\n\n Returns\n -------\n xarray.DataSet\n In-memory Xarray dataset\n \"\"\"\n\n if member not in tar.getnames():\n member = \"./\" + member\n f = tar.extractfile(member)\n data = f.read()\n\n # the line below is retained for NetCDF4 library reference\n # dataset = netCDF4.Dataset(\"in-mem-file\", mode=\"r\", memory=data)\n\n dataset = xr.open_dataset(data)\n\n return dataset\n\n\ndef image_handler(figs, dictArgs, filename=\"./figure\"):\n \"\"\"Generic routine for image handling\"\"\"\n\n imgbufs = []\n numfigs = len(figs)\n\n if not isinstance(filename, list):\n filename = [filename]\n\n assert (\n len(filename) == numfigs\n ), \"Number of figure handles and file names do not match.\"\n\n if dictArgs[\"interactive\"] is True:\n plt.ion()\n for n, fig in enumerate(figs):\n plt.show(fig)\n\n def _signal_handler(sig, frame):\n print(\"Complete!\")\n sys.exit(0)\n\n signal.signal(signal.SIGINT, _signal_handler)\n print(\"Press ctrl+c to exit...\")\n signal.pause()\n else:\n for n, fig in enumerate(figs):\n if dictArgs[\"format\"] == \"stream\":\n imgbuf = io.BytesIO()\n fig.savefig(imgbuf, format=\"png\", bbox_inches=\"tight\")\n imgbufs.append(imgbuf)\n else:\n fig.savefig(\n f\"{filename[n]}.png\",\n format=dictArgs[\"format\"],\n dpi=150,\n bbox_inches=\"tight\",\n )\n\n return imgbufs\n\n\ndef infer_and_assign_coord(ds, da, coordname):\n \"\"\" infer what the coord name is and assign it to dataarray \"\"\"\n assigned_coordname = try_variable_from_list(\n list(ds.variables), possible_names[coordname]\n )\n if (assigned_coordname is not None) and (assigned_coordname in da.dims):\n da = da.rename({assigned_coordname: f\"assigned_{coordname}\"})\n return da\n\n\ndef read_data(ds, possible_variable_names):\n \"\"\" read data from one file \"\"\"\n\n # find the appropriate variable names\n varname = try_variable_from_list(list(ds.variables), possible_variable_names)\n if varname is None:\n raise ValueError(f\"no suitable variable found in dataset\")\n\n da = ds[varname]\n da = infer_and_assign_coord(ds, da, \"lon\")\n da = infer_and_assign_coord(ds, da, \"lat\")\n da = infer_and_assign_coord(ds, da, \"time\")\n da = infer_and_assign_coord(ds, da, \"depth\")\n da = infer_and_assign_coord(ds, da, \"interfaces\")\n\n return da\n\n\ndef standard_grid_cell_area(lat, lon, rE=6371.0e3):\n \"\"\" computes the cell area for a standard spherical grid \"\"\"\n\n warnings.warn(\n \"standard_grid_cell_area is deprecated, use compute_area_regular_grid\",\n DeprecationWarning,\n )\n\n dLat = lat[1] - lat[0]\n dLon = lon[1] - lon[0]\n area = np.empty((len(lat), len(lon)))\n for j in range(0, len(lat)):\n for i in range(0, len(lon)):\n lon1 = lon[i] + dLon / 2\n lon0 = lon[i] - dLon / 2\n lat1 = lat[j] + dLat / 2\n lat0 = lat[j] - dLat / 2\n area[j, i] = (\n (np.pi / 180.0)\n * rE\n * rE\n * np.abs(np.sin(np.radians(lat0)) - np.sin(np.radians(lat1)))\n * np.abs(lon0 - lon1)\n )\n return area\n\n\ndef subset_data(da, coordname, subset):\n \"\"\" subset (float or slice) dataarray along coord \"\"\"\n if coordname in da.coords:\n da = da.sel({coordname: subset})\n return da\n\n\ndef simple_average(da, coordname):\n \"\"\" average \"\"\"\n if coordname in da.coords:\n da = da.mean(dim=coordname)\n return da\n\n\ndef copy_coordinates(da1, da2, coords):\n \"\"\" copy coordinates of da1 into da2 \"\"\"\n for coord in coords:\n da2[coord] = da1[coord]\n\n return da2\n\n\ndef compute_area_regular_grid(ds, Rearth=6378e3):\n \"\"\" compute the cells area on a regular grid \"\"\"\n\n rfac = 2 * np.pi * Rearth / 360\n\n up = {\"bnds\": 1}\n down = {\"bnds\": 0}\n if \"time\" in ds[\"lon_bnds\"].dims:\n up.update({\"time\": 0})\n down.update({\"time\": 0})\n\n dx1d = rfac * (ds[\"lon_bnds\"].isel(up) - ds[\"lon_bnds\"].isel(down))\n dy1d = rfac * (ds[\"lat_bnds\"].isel(up) - ds[\"lat_bnds\"].isel(down))\n\n dx2d, dy2d = np.meshgrid(dx1d, dy1d)\n _, lat2d = np.meshgrid(ds[\"lon\"].values, ds[\"lat\"].values)\n\n dx = dx2d * np.cos(2 * np.pi * lat2d / 360)\n dy = dy2d\n area = dx * dy\n return area\n\n\ndef grid_from_supergrid(ds, point_type=\"t\"):\n \"\"\"Subsample super grid to obtain geolon, geolat, and cell area\n\n Parameters\n ----------\n ds : xarray.Dataset\n Input dataset containing variables from the supergrid\n point_type : str, optional\n Requested grid type of t|q|u|v, by default \"t\"\n\n Returns\n -------\n geolat : xarray.DataArray\n 2-dimensional Earth-centric latitude coordinates\n geolon : xarray.DataArray\n 2-dimensional Earth-centric longitude coordinates\n area : xarray.DataArray\n Array of cell areas with dimension (geolat,geolon)\n \"\"\"\n geolat = subsample_supergrid(ds, \"y\", point_type)\n geolon = subsample_supergrid(ds, \"x\", point_type)\n area = sum_on_supergrid(ds, \"area\", point_type)\n return geolat, geolon, area\n\n\ndef horizontal_grid(dictArgs=None, point_type=\"t\", output_type=\"xarray\"):\n \"\"\"Returns horizontal grid parameters based on the values of the CLI\n arguments and the presence of intake catalogs.\n\n The requested grid can either tracer points (t), corner points (q),\n zonal velocity points (u), or meridional velocity points (v).\n\n The corresponding basin mask for the grid is also returned.\n\n The nominal x and y 1-D coordinates are provided. Since these are\n non-physical,however, they should only be used for plotting purposes.\n\n If `dictArgs` is omitted, the function returns a standard 1x1\n spherical grid.\n\n Parameters\n ----------\n dictArgs : dict, optional\n dictionary of arguments obtained from the CLI parser, by default None\n point_type : str, optional\n Requested grid type of t|q|u|v, by default \"t\"\n output_type : str, optional\n Specify output format of either \"xarray\" or \"numpy\", by default \"xarray\"\n\n Returns\n -------\n xarray.Dataset or tuple\n Arrays of geolat, geolon, nominal_x, nominal_y, area, and basin\n \"\"\"\n point_type = point_type.upper()\n\n # if verbose if present in dictArgs that was generated by the parser,\n # that value takes precedence over the kwarg version\n verbose = dictArgs[\"verbose\"] if \"verbose\" in dictArgs else False\n basin_file = dictArgs[\"basin\"] if \"basin\" in dictArgs else None\n\n if dictArgs is None:\n x = np.arange(0.5, 360.5, 1.0)\n y = np.arange(-89.5, 90.5, 1.0)\n area = standard_grid_cell_area(y, x)\n geolon, geolat = np.meshgrid(x, y)\n geolon = xr.DataArray(geolon, dims=(\"y\", \"x\"), coords={\"y\": y, \"x\": x})\n geolat = xr.DataArray(geolat, dims=(\"y\", \"x\"), coords={\"y\": y, \"x\": x})\n area = xr.DataArray(area, dims=(\"y\", \"x\"), coords={\"y\": y, \"x\": x})\n nominal_x = geolon[geolon.dims[-1]]\n nominal_y = geolat[geolat.dims[-2]]\n\n elif dictArgs[\"hgrid\"] is not None:\n if verbose:\n print(\"Using optional hgrid file for horizontal grid.\")\n ds = xr.open_dataset(dictArgs[\"hgrid\"])\n geolat, geolon, area = grid_from_supergrid(ds, point_type)\n\n elif dictArgs[\"static\"] is not None:\n if verbose:\n print(\"Using optional static file for horizontal grid.\")\n\n ds = xr.open_dataset(dictArgs[\"static\"])\n\n if point_type == \"T\":\n geolat = ds[\"geolat\"]\n geolon = ds[\"geolon\"]\n area = ds[\"areacello\"]\n\n elif point_type == \"U\":\n geolat = ds[\"geolat_u\"]\n geolon = ds[\"geolon_u\"]\n area = ds[\"areacello_cu\"]\n\n elif point_type == \"V\":\n geolat = ds[\"geolat_v\"]\n geolon = ds[\"geolon_v\"]\n area = ds[\"areacello_cv\"]\n\n else:\n raise ValueError(\"Unknown point type. Must be T, U, or V\")\n\n elif dictArgs[\"gridspec\"] is not None:\n if verbose:\n print(\"Using optional gridspec tar file for horizontal grid.\")\n tar = tf.open(dictArgs[\"gridspec\"])\n ds = extract_from_tar(tar, \"ocean_hgrid.nc\")\n geolat, geolon, area = grid_from_supergrid(ds, point_type)\n\n elif dictArgs[\"platform\"] is not None and dictArgs[\"config\"] is not None:\n if verbose:\n print(\n f\"Using {dictArgs['platform']} {dictArgs['config']} intake catalog for horizontal grid.\"\n )\n cat = open_intake_catalog(dictArgs[\"platform\"], dictArgs[\"config\"])\n ds = cat[\"ocean_hgrid\"].to_dask()\n geolat, geolon, area = grid_from_supergrid(ds, point_type)\n\n result = xr.Dataset()\n result[\"geolat\"] = geolat\n result[\"geolon\"] = geolon\n result[\"area\"] = area\n\n # nominal coordinates\n result[\"nominal_x\"] = result.geolon.max(axis=-2)\n result[\"nominal_y\"] = result.geolat.max(axis=-1)\n\n if result[\"nominal_y\"].min() >= 0:\n warnings.warn(\"Nominal y latitude is positive definite. May be incorrect.\")\n\n if result[\"nominal_x\"].max() > 360:\n warnings.warn(\"Nominal x longitude > 360. May be incorrect.\")\n\n # -- process basin codes while we are here\n if basin_file is not None:\n if verbose:\n print(\"Using optional file for basin code specification.\")\n ds = xr.open_dataset(dictArgs[\"hgrid\"])\n result[\"basin\"] = ds.basin\n else:\n result[\"basin\"] = generate_basin_codes(result, lon=\"geolon\", lat=\"geolat\")\n result[\"basin\"] = result.basin.fillna(0.0)\n\n if output_type == \"numpy\":\n geolat = np.array(result.geolat.to_masked_array())\n geolon = np.array(result.geolon.to_masked_array())\n nominal_x = np.array(result.nominal_x.to_masked_array())\n nominal_y = np.array(result.nominal_y.to_masked_array())\n area = np.array(result.area.to_masked_array())\n basin = np.array(result.basin.to_masked_array())\n result = (geolat, geolon, nominal_x, nominal_y, area, basin)\n\n return result\n\n\ndef open_intake_catalog(platform, config):\n \"\"\"Returns an Intake catalog for a specified platform and config\n\n Uses the package resources included in the om4labs distribution.\n\n Parameters\n ----------\n platform : str\n Site description, e.g. \"gfdl\", \"orion\", \"testing\"\n config : str\n Model configuration, e.g. \"OM4p5\", \"OM4p25\"\n\n Returns\n -------\n intake.catalog.Catalog\n Intake catalog corresponding to specified platform/config\n \"\"\"\n cat_platform = f\"catalogs/{config}_catalog_{platform}.yml\"\n catfile = pkgr.resource_filename(\"om4labs\", cat_platform)\n cat = intake.open_catalog(catfile)\n return cat\n\n\ndef read_topography(dictArgs):\n \"\"\"Returns topography field based on the values of the CLI\n arguments and the presence of intake catalogs.\n\n Parameters\n ----------\n dictArgs : dict, optional\n dictionary of arguments obtained from the CLI parser, by default None\n\n Returns\n -------\n numpy.ma.maskedArray\n topography array\n \"\"\"\n\n verbose = dictArgs[\"verbose\"] if \"verbose\" in dictArgs else False\n\n if dictArgs[\"topog\"] is not None:\n if verbose:\n print(\"Using optional topg file for depth field\")\n ds = xr.open_dataset(dictArgs[\"topog\"])\n\n elif dictArgs[\"static\"] is not None:\n if verbose:\n print(\"Using optional static file for depth field.\")\n ds = xr.open_dataset(dictArgs[\"static\"])\n\n elif dictArgs[\"gridspec\"] is not None:\n if verbose:\n print(\"Using optional gridspec tar file for depth field.\")\n tar = tf.open(dictArgs[\"gridspec\"])\n ds = extract_from_tar(tar, \"ocean_topog.nc\")\n\n elif dictArgs[\"platform\"] is not None and dictArgs[\"config\"] is not None:\n if verbose:\n print(\n f\"Using {dictArgs['platform']} {dictArgs['config']} intake catalog for depth field.\"\n )\n cat = open_intake_catalog(dictArgs[\"platform\"], dictArgs[\"config\"])\n ds = cat[\"topog\"].to_dask()\n\n if \"deptho\" in list(ds.variables):\n depth = ds.deptho.to_masked_array()\n elif \"depth\" in list(ds.variables):\n depth = ds.depth.to_masked_array()\n\n depth = np.where(np.isnan(depth), 0.0, depth)\n\n return depth\n","sub_path":"om4labs/om4common.py","file_name":"om4common.py","file_ext":"py","file_size_in_byte":14748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"357307767","text":"import tensorflow as tf\n\nclass AnnModel:\n img_size_x = 28\n img_size_y = 28\n img_size_flat = img_size_x * img_size_y\n img_shape = (img_size_x, img_size_y)\n num_channels = 1\n num_classes = 10\n\n # Convolutional layer 1\n filter_size1 = 5 # Convolution filters are 5 x 5 pixels.\n num_filters1 = 16 # There are 16 of these filters.\n\n # Convolutional Layer 2.\n filter_size2 = 5 # Convolution filters are 5 x 5 pixels.\n num_filters2 = 36 # There are 36 of these filters.\n\n # Fully-connected layer.\n fc_size = 128 # Number of neurons in fully-connected layer.\n\n def new_weights(self, shape):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.05))\n\n def new_biases(self, length):\n return tf.Variable(tf.constant(0.05, shape=[length]))\n\n def new_conv_layer(self, input_layer, num_input_channels, filter_size, num_filters):\n \"\"\"\n Create new convolutional layer\n :param input_layer: previous layer\n :param num_input_channels: number of inputs from previous layer\n :param filter_size: size of each filter ( width and height )\n :param num_filters: number of filters\n :return: layer and weights\n \"\"\"\n\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n weights = self.new_weights(shape=shape)\n biases = self.new_biases(length=num_filters)\n\n layer = tf.nn.conv2d(input=input_layer, filter=weights, strides=[1, 1, 1, 1], padding='SAME')\n layer += biases # add biases\n\n # Rectified Linear Unit (ReLU).\n # It calculates max(x, 0) for each input pixel x.\n # This adds some non-linearity to the formula and allows us\n # to learn more complicated functions.\n layer = tf.nn.relu(layer)\n return layer, weights\n\n def flatten_layer(self, layer):\n \"\"\"\n reshape 4 dimensional convolutional output layer to 2 dimensional input for full connected layer\n input layer_shape == [num_images, img_height, img_width, num_channels]\n The number of features is: img_height * img_width * num_channels\n The shape of the flattened layer is now: [num_images, img_height * img_width * num_channels]\n :param layer: 4 dimensional layer\n :return: layer flat , number of features\n \"\"\"\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n return layer_flat, num_features\n\n def new_fc_layer(self, input_layer, num_inputs, num_outputs, use_relu=True):\n \"\"\"\n create full connected layer\n :param input_layer: previous layer\n :param num_inputs: number of inputs from previous layer\n :param num_outputs: number of outputs\n :param use_relu: Use Rectified Linear Unit (ReLU) ( True, False)\n :return: created layer\n \"\"\"\n weights = self.new_weights(shape=[num_inputs, num_outputs])\n biases = self.new_biases(length=num_outputs)\n\n # Calculate the layer as the matrix multiplication of\n # the input and weights, and then add the bias-values.\n layer = tf.matmul(input_layer, weights) + biases\n\n # Use ReLU?\n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer\n\n def create_placeholders(self):\n self.x = tf.placeholder(tf.float32, shape=[None, self.img_size_flat], name='x')\n self.x_image = tf.reshape(self.x, [-1, self.img_size_x, self.img_size_y, self.num_channels])\n self.y_true = tf.placeholder(tf.float32, shape=[None, self.num_classes], name='y_true')\n self.y_true_cls = tf.argmax(self.y_true, dimension=1)\n\n # ------------------- CREATE ANN MODEL --------------------\n def create_ann(self):\n\n # create two convolutional layers\n layer_conv1, self.weights_conv1 = self.new_conv_layer(input_layer=self.x_image, num_input_channels=self.num_channels, filter_size=self.filter_size1, num_filters=self.num_filters1)\n layer_conv2, self.weights_conv2 = self.new_conv_layer(input_layer=layer_conv1, num_input_channels=self.num_filters1, filter_size=self.filter_size2, num_filters=self.num_filters2)\n\n #flattern output of second convolutional layer\n layer_flat, num_features = self.flatten_layer(layer_conv2)\n\n #create two full connected layers\n layer_fc1 = self.new_fc_layer(input_layer=layer_flat, num_inputs=num_features, num_outputs=self.fc_size, use_relu=True)\n layer_fc2 = self.new_fc_layer(input_layer=layer_fc1, num_inputs=self.fc_size, num_outputs=self.num_classes, use_relu=False)\n\n # ----------------- CREATE PREDICTIONS AND OPTIMIZER -----\n self.y_pred = tf.nn.softmax(layer_fc2) # scale output of full connected layer 2 to sum of 1\n self.y_pred_cls = tf.argmax(self.y_pred, dimension=1) # set one hot for class with biggest value in y_pred\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=self.y_true) #calculate cross entropy with tensorflow built in function\n cost = tf.reduce_mean(cross_entropy)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost) #Adam optimizer for learning process\n self.correct_prediction = tf.equal(self.y_pred_cls, self.y_true_cls) #correct prediction is vector of booleans. If predict and correct is same value is True.\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) #tf.cast casts True to 1 and Flase to 0. tf.reduce_mean calculates average\n\n return self.optimizer, self.weights_conv1, self.weights_conv2, self.y_pred_cls, self.accuracy\n\n","sub_path":"recognition/annmodel.py","file_name":"annmodel.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"35416811","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 24 16:29:45 2018\r\n\r\n@author: Nagadeepa\r\n\"\"\"\r\nimport Single_LL as LL\r\n\r\ndef delete(lis,value):\r\n deleted = False\r\n if(lis.isEmpty()):\r\n print( \"List is Empty\")\r\n return deleted\r\n \r\n current = lis.getHead().next\r\n prev = lis.getHead()\r\n # Traversing the list\r\n while current.next:\r\n if (current.data == value):\r\n prev.next = current.next\r\n deleted = True\r\n break\r\n \r\n prev = current\r\n current = current.next\r\n \r\n \r\n return deleted\r\n#---------------------------------------------------------------------------#\r\n \r\nli = LL.LinkedList()\r\ndata_list = input('Please enter the elements in the linked list: ').split()\r\nfor data in data_list:\r\n li.insertAtHead((data))\r\nd = input(\"Please enter the number to be deleted : \")\r\n\r\nli.printList()\r\nif (delete(li, d)):\r\n print ( d , \" Deleted !\")\r\nelse:\r\n print (d ,\" is not in List !\")\r\nli.printList()\r\n","sub_path":"Week_04_LinkedLists/6_delete.py","file_name":"6_delete.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"73090606","text":"from django.shortcuts import render\nfrom apps.index.models import Category, Banner, ShopImage, SubCate\nfrom django.views.decorators.cache import cache_page\n\n\n# Create your views here.\n@cache_page(60 * 15)\ndef index(request):\n banners = Banner.objects.all()\n category_list = Category.objects.all()\n sub_cate_es = SubCate.objects.all()\n for cate in category_list:\n cate.sub_cate_list = cate.subcate_set.all()\n for sub_cate in sub_cate_es:\n sub_cate.shops = sub_cate.shop_set.all().values('shop_id', 'name', 'promote_price')\n for shop in sub_cate.shops:\n shop.update(img=ShopImage.objects.filter(shop_id=shop.get('shop_id')).first())\n return render(request, 'index.html', {'category_list': category_list, 'sub_cate_es': sub_cate_es})\n","sub_path":"apps/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"9145164","text":"'''\n#Ejercicio 1\ndef sleep_in(weekday, vacation):\n if not weekday or vacation:\n return True\n else:\n return False\n\n\nprint(sleep_in(False, False) )\nprint(sleep_in(True, False) ) \nprint(sleep_in(False, True) )\n\nprint(\"-------------------------------\")\n#Ejercicio 2\ndef monkey_trouble(a_smile, b_smile):\n if a_smile and b_smile:\n return True\n if not a_smile and not b_smile:\n return True\n return False\n\nprint(monkey_trouble(True, True) )\nprint(monkey_trouble(False, False) )\nprint(monkey_trouble(True, False) )\n\nprint(\"-------------------------------\")\n#Ejercicio 3\ndef sum_double(a, b):\n # Store the sum in a local variable\n sum = a + b\n \n # Double it if a and b are the same\n if a == b:\n sum = sum * 2\n return sum\nprint(sum_double(1, 2) )\nprint(sum_double(3, 2) ) \nprint(sum_double(2, 2) )\n\nprint(\"-------------------------------\")\n#Ejercicio 4\n\ndef not_string(str):\n if len(str) >= 3 and str[:3] == \"not\":\n return str\n return \"not \" + str\n\nprint(not_string('candy') )\nprint(not_string('x') )\nprint(not_string('not bad') )\n\nprint(\"-------------------------------\")\n#Ejercicio 5\ndef missing_char(str, n):\n front = str[:n] # up to but not including n\n back = str[n+1:] # n+1 through end of string\n return front + back\n\nprint(missing_char('kitten', 1) )\nprint(missing_char('kitten', 0) )\nprint(missing_char('kitten', 4) )\n\nprint(\"-------------------------------\")\n#Ejercicio 6\ndef front3(str):\n # Figure the end of the front\n front_end = 3\n if len(str) < front_end:\n front_end = len(str)\n front = str[:front_end]\n return front + front + front \n\nprint(front3('Java') )\nprint(front3('Chocolate') ) \nprint(front3('abc') )\n\nprint(\"-------------------------------\")\n#Ejercicio 7\ndef string_times(str, n):\n result = \"\"\n for i in range(n): # range(n) is [0, 1, 2, .... n-1]\n result = result + str # could use += here\n return result\n\nprint(string_times('Hi', 2) )\nprint(string_times('Hi', 3) )\nprint(string_times('Hi', 1) )\n\nprint(\"-------------------------------\")\n#Ejercicio 8\ndef front_times(str, n):\n front_len = 3\n if front_len > len(str):\n front_len = len(str)\n front = str[:front_len]\n \n result = \"\"\n for i in range(n):\n result = result + front\n return result\n\nprint(front_times('Chocolate', 2) )\nprint(front_times('Chocolate', 3) )\nprint(front_times('Abc', 3) )\n\nprint(\"-------------------------------\")\n#Ejercicio 9\ndef string_bits(str):\n result = \"\"\n # Many ways to do this. This uses the standard loop of i on every char,\n # and inside the loop skips the odd index values.\n for i in range(len(str)):\n if i % 2 == 0:\n result = result + str[i]\n return result\n\nprint(string_bits('Hello') )\nprint(string_bits('Hi') )\nprint(string_bits('Heeololeo') )\n'''\n\nprint(\"-------------------------------\")\n#Ejercicio 10\ndef string_match(a,b):\n\tshorter = min(len(a), len(b))\n\tcount = 0\n\tfor i in range(shorter-1):\n\t\ta_sub = a[i:i+2]\n\t\tb_sub = b[i:i+2]\n\t\tif a_sub == b_sub:\n\t\t\tcount = count + 1\n\treturn count\ni=input()\nj=input()\nprint(string_match(i,j))\nprint(string_match('xxcaazz','xxbaaz'))\nprint(string_match('abc','abc'))\nprint(string_match('abc','axc'))\n\n\n\nprint(\"-------------------------------\")\n#Ejercicio 11\n\ndef lone_sum(a,b,c):\n\tif a==b==c:\n\t\treturn 0\n\tif a==b:\n\t\treturn c\n\tif b==c:\n\t\treturn a\n\tif a==c:\n\t\treturn b\n\treturn a+b+c\n\na=int(input(\"A? : \"))\nb=int(input(\"B? : \"))\nc=int(input(\"C? : \"))\nprint(lone_sum(a,b,c))\nprint(lone_sum(1,2,3))\nprint(lone_sum(3,2,3))\nprint(lone_sum(3,3,3))\n","sub_path":"p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89781727","text":"fname = input(\"Enter file name: \")\nif len(fname) < 1 : fname = \"mbox-short.txt\"\nfh = open(fname)\ncount = {};lst=[]\nfor line in fh:\n if not line.startswith('From '): continue\n words=line.split()\n hour=words[5][0:2]\n count[hour]=count.get(hour,0)+1\n\nfor k,v in count.items():\n lst.append((k,v))\nlst.sort()\nfor item in lst:\n print(item[0],item[1])\n","sub_path":"course2/10_2.py","file_name":"10_2.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"368489233","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport MeCab\nimport fileinput\n\nif __name__ == '__main__':\n s = \"すもももももももものうち\"\n parser = MeCab.Tagger()\n\n for m in parser.parse(s).split(\"\\n\"):\n print(m.split(\"\\t\")[0])\n","sub_path":"03/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"376386929","text":"import typing\nimport abc\nimport numpy as np\n\nfrom algos.calc import calc_filtered\nfrom algos.scheme import Structure, Chapter, Node, enum\n\n\nclass Filterer(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def is_end(self, parent: str, child: str) -> bool:\n pass\n\n\nclass FiltererProxy(Filterer):\n def is_end(self, parent: str, child: str) -> bool:\n return True\n\n\nclass LSTMFilterer(Filterer):\n def __init__(self, fmodel: str):\n self.classifier = get_classifier(fmodel)\n\n def is_end(self, parent: str, child: str) -> bool:\n return self.classifier.predict(parent, child) != 0\n\n\nclass LSTMClassifier(metaclass=abc.ABCMeta):\n def __init__(self, fmodel: str):\n pass\n\n # @abc.abstractmethod\n # def _explain_result(self, result: float) -> int:\n # pass\n\n # @abc.abstractmethod\n # def _vectorize(self, parent: str, child: str) -> np.ndarray:\n # pass\n\n def predict(self, parent: str, child: str) -> int:\n return 1\n\n\nclass Indicator():\n def __init__(self, class_fmodel: str, filter_fmodel: str):\n self.classifier = get_classifier(class_fmodel)\n self.filterer = get_filterer(filter_fmodel)\n self.class_id = 1\n self.classified: typing.List[typing.Tuple[str, str]] = []\n\n def its_mine(self, parent: str, child: str) -> bool:\n if self.classifier.predict(parent, child) == self.class_id:\n self.classified.append((parent, child))\n return True\n else:\n return False\n\n def calc(self,\n structure: Structure,\n nums: typing.Dict[str, float]) -> typing.Optional[float]:\n\n self.classified = []\n value: typing.Optional[float] = None\n\n structure = typing.cast(\n typing.Dict[str, typing.Dict[str, Chapter]],\n structure)\n for node in structure['bs']['chapter'].nodes.values():\n node_value = calc_filtered(node,\n nums,\n self.filterer.is_end,\n self.its_mine)\n if node_value and value:\n value += node_value\n if node_value and not value:\n value = node_value\n\n return value\n\n\ndef get_classifier(fmodel: str) -> LSTMClassifier:\n return LSTMClassifier(fmodel=fmodel)\n\n\ndef get_filterer(fmodel: str) -> Filterer:\n if fmodel == 'proxy':\n return FiltererProxy()\n\n return LSTMFilterer(fmodel)\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"445331108","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\nm = int(sys.stdin.readline())\nm = m/1000\nif 0.1>m:\n VV = 0\nelif 0.1<=m<=5:\n VV = m*10\nelif 6<=m<=30:\n VV = m+50\nelif 35<=m<=70:\n VV = ((m-30)/5)+80\nelif 70<=m:\n VV = 89\n\nprint('%02d'%VV)\n","sub_path":"atcoder/ABC/001/abc01b.py","file_name":"abc01b.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"356819886","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@author: zhaogao\n@license: (C) Copyright 2013-2017.\n@contact: 449628536@qq.com\n@software: learn-py\n@file: len74_读写字节数据.py\n@time: 22/02/2018 2:17 PM\n'''\n\n# 你想读写二进制文件,比如图片,声音文件等等\n# 使用模式为 rb 或 wb 的 open() 函数来读取或写入二进制数据\n\nfile_path = '/Users/zhaogao/PycharmProjects/learn-py/cook/data/somefile.txt'\n\n# read the entire file as a single byte string\nwith open(file_path, 'rb') as f:\n data = f.read()\n\n# write binary data to a file\nwith open(file_path, 'wb') as f:\n f.write(b'hello wrold!')\n\n# 特别需要注意的是,索引和迭代动作返回的是字节的值而不是字节字符串\n\n# text string\nt = 'hello world'\nprint(t[0])\nfor c in t:\n print(c)\n\n# byte string\nb = b'hello wrold'\nb[0]\nfor c in b:\n print(c)\n\n# 如果你想从二进制模式的文件中读取或写入文本数据,必须确保要进行解码和编码操作\nwith open(file_path, 'rb') as f:\n data = f.read(16)\n text = data.decode('utf-8')\n\nwith open(file_path, 'wb') as f:\n text = 'Hello World'\n f.write(text.encode('utf-8'))\n\n# 二进制 I/O 还有一个鲜为人知的特性就是数组和 C 结构体类型能直接被写入,而 不需要中间转换为自己对象\nimport array\n\nnums = array.array('i', [1, 2, 3, 4])\nwith open(file_path, 'wb') as f:\n f.write(nums)\n\na = array.array('i', [0, 0, 0, 0, 0, 0, 0, 0])\nwith open(file_path, 'rb') as f:\n data=f.readinto(a)\n print(data)\nprint(a)\n","sub_path":"cook/len74_读写字节数据.py","file_name":"len74_读写字节数据.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190166205","text":"from sklearn import svm\nfrom sklearn.model_selection import RandomizedSearchCV\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom openBCI import config as cf\nfrom sklearn.model_selection import train_test_split\n\ndata = pd.read_csv(cf.prepared_data_15min)\nX = data.drop(['0'], axis=1)\ny = data[['0']].values.ravel()\n\n# Splitting the dataset into the Training set and Test set\nX_Train, x_test, Y_Train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\n# Feature Scaling\nStdScaler = StandardScaler()\nX_Train = StdScaler.fit_transform(X_Train)\n\n# Kernel types\nkernel = ['linear', 'rbf']\n# C parameter values\nC = [0.05, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 5, 10]\n# Gamma parameter values\ngamma = [1/4000000, 1/2000000, 1/400000, 1/200000, 1/40000, 1/20000, 1/4000, 1/2000, 1/400, 1/200]\n\n\nrandom_grid = {'C': C,\n 'kernel': kernel,\n 'gamma': gamma}\n\nclf = svm.SVC(random_state=0)\n\nrf_random = RandomizedSearchCV(estimator=clf, param_distributions=random_grid,\n n_iter=4, cv=2, verbose=10, random_state=0, n_jobs=-1)\n\n# Fit the random search model\nrf_random.fit(X_Train, Y_Train)\n\nbest_p = rf_random.best_params_\nbest_r = rf_random.best_score_\nprint(best_p, best_r)\n\ndef cv_result(name: str):\n import json\n with open(\"../CV_result/cv_SVM.txt\", \"w\") as f:\n f.write(\n 'Parameters used for Randomized grid search on ' + name + '\\'s dataset: \\nn_iter: ' + str(rf_random.n_iter) + \"\\ncv: \" + str(rf_random.cv))\n f.write('\\nBest Params: \\n')\n f.write(json.dumps(best_p))\n f.write('\\nBest Accuracy: \\n')\n f.write(json.dumps(best_r))\n f.close()\n\n\ncv_result(name='Mark')\nprint('Hyperparameters tuning for SVM is completed')","sub_path":"SVM_Classifier/SVM_CrossValidation.py","file_name":"SVM_CrossValidation.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124723717","text":"import os\n\nimport pandas as pd\nimport re\n\n\nattributes_dict = {}\ntags = set()\nclass_dummy = set()\nclass_dummy_col_set = set()\nfile_path_set = set()\nattribute_col_name_set = set()\nfile_col_dict = {}\ncol_name_list = []\n\n\ndef main_tag(tag):\n return re.findall('<.*?>', str(tag))\n\n\ndef find_digit(tag):\n return re.findall('\\d*', tag)\n\n\ndef attributes_regex(tag):\n return re.findall(\"(?<=\\s)[\\w]*=['\\\"]+[^'\\\"]*['\\\"]+(?=>|\\s)\", str(tag))\n\n\ndef attribute_name(value):\n split_regex = re.compile(\"=['\\\"]\").split(value)\n attr_name = split_regex[0]\n return attr_name\n\n\ndef attribute_value(value):\n split_regex = re.compile(\"=['\\\"]\").split(value)\n attr_value = re.sub(\"['\\\"]\", \"\", split_regex[1])\n return attr_value\n\n\ndef no_of_parents(tag):\n return re.findall(\"'.*?,\", str(tag))\n\n\ndef binary_to_decimal(binary):\n return int(binary, 2)\n\n\ndef initial_feature_columns(file_name):\n data = pd.read_csv(file_name + \".csv\")\n df = pd.DataFrame(data)\n df = df.drop(columns='Description')\n df['Parent_Count'] = df['Dom_Content'].apply(no_of_parents).str[0].str.replace(\"'\", \"\").str.replace(\",\", \"\")\n df['Full_Content'] = df['Dom_Content'].str.split(',', 1).str[1].str.replace(\"']\", '').str.strip()\n df = df.drop(columns='Dom_Content')\n df['Char_Count'] = df['Full_Content'].str.len()\n df['Main_Tag'] = df['Full_Content'].apply(main_tag).str[0]\n df['Main_Tag_Char_Count'] = df['Main_Tag'].str.len()\n df['Main_Tag_Name'] = df['Main_Tag'].str.split().str[0].str.replace('<', '').str.replace('>', '')\n df['Main_Tag_Attributes'] = df['Main_Tag'].apply(attributes_regex)\n for index, row in df.iterrows():\n tags.add(row['Main_Tag_Name'])\n for tag in tags:\n df['Tag_Count_' + str(tag)] = df['Full_Content'].str.count('<' + str(tag) + '>|<' + str(tag) + ' ')\n df = df.drop(columns='Full_Content')\n df.to_csv(file_name[:-1] + \".csv\", index=None)\n os.remove(file_name + \".csv\")\n\n\ndef feature_columns(file_name):\n data = pd.read_csv(file_name + \".csv\")\n df = pd.DataFrame(data)\n df['Main_Tag_Attributes'] = df['Main_Tag'].apply(attributes_regex)\n for v in df['Main_Tag_Attributes']:\n for val in v:\n attribute_column_name = attribute_name(val)\n attribute_column_value = attribute_value(val)\n attribute_col_name_set.add('Attr_' + attribute_column_name)\n if attribute_column_name not in attributes_dict.keys():\n attributes_dict[attribute_column_name] = attribute_column_value\n df['Attr_' + attribute_column_name] = \"\"\n for i in range(0, df.shape[0]):\n for v in df.at[i, 'Main_Tag_Attributes']:\n attribute_column_name = attribute_name(v)\n attribute_column_value = attribute_value(v)\n df.at[i, 'Attr_' + attribute_column_name] = attribute_column_value\n if attribute_column_name == 'class':\n class_dummy.update(set(attribute_column_value.split()))\n for dummy in class_dummy:\n if dummy in set(attribute_column_value.split()):\n df.at[i, 'Attribute_Class_' + str(dummy)] = \"1\"\n else:\n df.at[i, 'Attribute_Class_' + str(dummy)] = \"0\"\n df['Attribute_Class_' + str(dummy)].fillna(value=\"0\", inplace=True)\n class_dummy_col_set.add('Attribute_Class_' + str(dummy))\n else:\n col_name_list.append('Attr_' + attribute_column_name)\n class_dummy_col_list = list(class_dummy_col_set)\n file_path = file_name.replace(\"/\\\\\", \"\\\\\").replace(\"/\", \"\\\\\")\n file_path_set.add(file_path)\n file_col_dict[file_path] = list(attribute_col_name_set)\n attribute_col_name_set.clear()\n class_dummy.clear()\n class_dummy_col_set.clear()\n class_dummy_col_list.clear()\n tags.clear()\n df.to_csv(file_name + \".csv\", index=None)\n dummy_df = pd.read_csv(file_name + \".csv\")\n attr_dummy_df = pd.get_dummies(data=dummy_df, columns=col_name_list, drop_first=True)\n attr_dummy_df.to_csv(file_name + \".csv\", index=None) #mode='w'\n col_name_list.clear()\n\n","sub_path":"Desktop/Boa-master/passFailScenario/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"311007705","text":"from .image import Image\nfrom .utils import BitArray\nfrom .pixel import Pixel\n\nclass ImageData():\n def __init__(self, header, scanlines, palette=None):\n self.header = header\n self.raw_data = b''.join(line[1:] for line in scanlines)\n self.scanlines = scanlines\n \n if palette:\n color_type = self.header.color_type\n self.palette = [Pixel(color_type, px) for px in palette.palette]\n else:\n self.palette = None\n\n self.image = Image(header.color_type, header.width, len(scanlines))\n\n self.rows = []\n for y, row in enumerate(scanlines):\n pixels = self._loadPixels(row)\n\n image_row = ImageDataRow(row[0], pixels, header.color_type, y, self.image)\n self.rows.append(image_row)\n\n def show(self):\n for row in self.rows:\n row.computePixels()\n self.image.show()\n\n def save_to_bytes(self):\n ret = b''\n for row in self.rows:\n raw = self._savePixels(row)\n ret += row.filter.to_bytes(1, 'big') + raw\n\n return ret\n\n def __bytes__(self):\n return self.save_to_bytes() \n\n def _loadPixels(self, row):\n pixels = []\n pxLen = self.header.pixel_len\n color_type = self.header.color_type\n\n if self.palette:\n pixels = [self.palette[row[i]] for i in range(1, len(row))]\n else:\n px = []\n bit_depth = self.header.bit_depth\n for val in BitArray(row[1:], bit_depth):\n if bit_depth != 8:\n val = val * (2**bit_depth - 1)\n px.append(val)\n if len(px) == pxLen:\n pixels.append(Pixel(color_type, px))\n px = []\n\n return pixels\n\n def _savePixels(self, row_data):\n # bit_depth\n row_ret = b''\n # px_ok = 0\n # px_ko = 0\n if self.palette:\n for px in row_data.pixels:\n if px in self.palette:\n idx = self.palette.index(px)\n row_ret += idx.to_bytes(1, 'big')\n # px_ok += 1\n else:\n # print(\"[!] Missing pixel %s in palette\" % px)\n # px_ko += 1\n row_ret += px[0].to_bytes(1, 'big')\n # print(\"[!] ok:%d ko:%d sum:%d\" % (px_ok, px_ko, px_ok+px_ko))\n\n else:\n for px in row_data.pixels:\n row_ret += px.to_bytes()\n\n return row_ret\n\n\nclass ImageDataRow():\n def __init__(self, filter_, pixels, color_type, y, image):\n self.filter = filter_\n self.pixels = pixels\n self.y = y\n self.color_type = color_type\n self.image = image\n\n def update_filter(self, new_filter:int):\n self.computePixels()\n\n if self.filter == new_filter:\n return\n\n new_pixels = []\n self.filter = new_filter\n for x, _ in enumerate(self.pixels):\n pixel = self._setpixelfilter(x, self.y, self.image)\n new_pixels.append(pixel)\n\n self.pixels = new_pixels\n\n\n def computePixels(self):\n for x, px in enumerate(self.pixels):\n pixel = self._getpixelfilter(x, self.y, px, self.image)\n self.image.putpixel((x, self.y), pixel)\n\n def _getpixelSafe(self, image, x, y):\n try:\n return image.getpixel((x, y))\n except IndexError:\n return Pixel(self.color_type)\n\n def _getpixelfilter(self, x, y, pixel, image):\n leftPixel = self._getpixelSafe(image, x - 1, y)\n upPixel = self._getpixelSafe(image, x, y - 1)\n cornerPixel = self._getpixelSafe(image, x - 1, y - 1)\n\n if self.filter == 1:\n pixel = pixel + leftPixel\n\n elif self.filter == 2:\n pixel = pixel + upPixel\n\n elif self.filter == 3:\n pixel = pixel.addMean(leftPixel, upPixel)\n\n elif self.filter == 4:\n bestPixel = Pixel.peath(leftPixel, upPixel, cornerPixel)\n pixel = pixel + bestPixel\n\n return pixel\n\n def _setpixelfilter(self, x, y, image):\n leftPixel = self._getpixelSafe(image, x - 1, y)\n upPixel = self._getpixelSafe(image, x, y - 1)\n cornerPixel = self._getpixelSafe(image, x - 1, y - 1)\n pixel = self._getpixelSafe(image, x, y)\n\n ret = pixel\n if self.filter == 1:\n ret = pixel - leftPixel\n\n elif self.filter == 2:\n ret = pixel - upPixel\n\n elif self.filter == 3:\n ret = pixel.subMean(leftPixel, upPixel)\n\n elif self.filter == 4:\n bestPixel = Pixel.peath(leftPixel, upPixel, cornerPixel)\n ret = pixel - bestPixel\n\n return ret\n","sub_path":"pngparser/imagedata.py","file_name":"imagedata.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185587577","text":"import Image\nimport ImageDraw\nimport ImageFont\nfrom time import *\nimport Adafruit_ILI9341 as TFT\nimport Adafruit_GPIO as GPIO\nimport Adafruit_GPIO.SPI as SPI\n\n\n# Raspberry Pi configuration.\nDC = 18#12 #18\nRST = 23#16 #23\nSPI_PORT = 0\nSPI_DEVICE = 0\n\n#variables\nnumber='1234567890'\nchars='aAbBcCdD'\n\n\n# Create TFT LCD display class.\ndisp = TFT.ILI9341(DC, rst=RST, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=64000000))\n\n# Initialize display.\ndisp.begin()\ndisp.clear()\ndraw = disp.draw()\n\n#colors\ncLabel=(100,200,0)\ncText1=(180,180,200)\n\nyPos=5\nyStep=25\n\n# Load default font.\nfont1 = ImageFont.load_default()\nfont2 = ImageFont.truetype('fonts/Open.24.Display.St.ttf',24) #14\nfont3 = ImageFont.truetype('fonts/nk57-monospace-no-rg.ttf',16);#14 working\n#not working draw.setFontSize(10)\n\ndraw.text((5,yPos),chars,font=font1,fill=cLabel);\nyPos+=20;\ndraw.text((5,yPos),chars,font=font2,fill=cLabel);\nyPos+=40;\ndraw.text((5,yPos),number,font=font3,fill=cLabel);\ndisp.display();\n","sub_path":"python/tft_demos/demo_fonts.py","file_name":"demo_fonts.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"431410320","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 9 17:05:41 2016\n\n@author: qitlab\n\"\"\"\nimport power_calibration_up\nfrom power_calibration_up import power_scan\nimport imp\nimport matplotlib.pylab as plt\nimport numpy as np\nimp.reload(power_calibration_up)\nstart = 150\nstop = 201\nstep = 1\n\nfreq_range = np.arange(start,stop,step)\n#cal_data=np.genfromtxt('up_1.txt')\ncal_data=np.genfromtxt('aftfb_corrected_down_2.txt')\n# set starting power make it low to ensure have enough adjustment when far from AOM resonance\nre=power_scan(freq_range,target_power=0.06/1000,cf=True,cal_data=[],initset_power=50,k1=5e5)\npowers_adj=[]\npowers = re[1]#run scan should take about a minute with 10 averages\npowers_adj = re[0]\nsetpower= re[2]\nsetpower_track=re[3]\npowers_track=re[4]\nplt.plot(freq_range,powers,'-o')#plot data to make sure it makes sense\nplt.plot(freq_range,powers_adj,'-o',color='red')#plot data to make sure it makes sense\n\nplt.xlabel('Frequency (MHz)')\nplt.ylabel('Power (mW)')\nplt.show()\nfig2=plt.figure()\nplt.plot(freq_range,setpower,'-o',color='black')#plot data to make sure it makes sense1\nplt.show()","sub_path":"power_scan_script.py","file_name":"power_scan_script.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"292384653","text":"# author:Prathamesh Deogharkar\r\nimport threading\r\nfrom database_config import init_db\r\nfrom detect_face import detectentry\r\n# from delete_face import delete_embedding\r\nimport pickle\r\nimport os\r\nfrom services import create_user_dataset,delete_embedding,GenerateDeviceID,auth_deviceID,getLanding\r\nimport socket\r\nfrom flask import Flask, render_template, Response, request, redirect, url_for, jsonify\r\nfrom flask_jsglue import JSGlue\r\nimport cv2\r\nimport numpy as np\r\nfrom train_model import train_embedding\r\nimport logging\r\n\r\napp = Flask(__name__)\r\nJSGlue(app)\r\n\r\nhostname = socket.gethostname()\r\nIPAddr = socket.gethostbyname(hostname)\r\npath = os.path.dirname(os.path.realpath(__file__))\r\nrecognizer = pickle.loads(open(path + \"/recognizer.pickle\", \"rb\").read())\r\nle = pickle.loads(open(path + \"/le.pickle\", \"rb\").read())\r\nlelock = threading.Lock()\r\ndataLock = threading.Lock()\r\n\r\nprint(\"Recognizer============>>\",recognizer)\r\nprint(\"Le===================>>\",le)\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return (\"Hello World\")\r\n\r\n\r\n@app.route('/register')\r\ndef register_user():\r\n # return render_template(\"Registration_del.html\")\r\n return render_template(\"Registration.html\")\r\n\r\n@app.route('/recognize',methods=[\"POST\"])\r\ndef recognize_face():\r\n data=request.form.getlist(\"image\")\r\n lowAcc=request.form.get(\"low_acc\")\r\n # action= \"intime\"\r\n # print(\"Location======>>\",request.form.get(\"location\"))\r\n print(\"DeviceID=======>>\",request.form.get(\"deviceID\"))\r\n print(\"config=======>>\", request.form.get(\"config\"))\r\n if(len(data)>0):\r\n result=detectentry(data,recognizer,le,lelock,int(lowAcc),request.form.get(\"location\"),request.form.get(\"deviceID\"),request.form.get(\"config\"))\r\n print(\"result=======>\",result)\r\n return jsonify({\"result\": \"Done\",\"value\":result[0],\"name\":result[1]})\r\n else:\r\n return jsonify({\"result\":\"Error\"})\r\n\r\n@app.route('/detect_face_in',methods=[\"GET\",\"POST\"])\r\ndef detect_face_in():\r\n if request.method==\"GET\":\r\n return render_template(\"Recognize.html\",data=\"3117TS202106060243\")\r\n else:\r\n data=request.form.get(\"deviceID\")\r\n loc=request.form.get(\"loc\")\r\n # data1=request.form.get(\"lastname\")\r\n print(data)\r\n return render_template(\"Recognize.html\",data=data,loc=loc)\r\n\r\n@app.route('/detect_face_out',methods=[\"GET\",\"POST\"])\r\ndef detect_face_out():\r\n if request.method==\"GET\":\r\n return render_template(\"Recognize_out.html\",data=\"3117TS202106060243\")\r\n else:\r\n data=request.form.get(\"deviceID\")\r\n loc = request.form.get(\"loc\")\r\n comment = request.form.get(\"data\")\r\n # data1=request.form.get(\"lastname\")\r\n print(\"DeviceID=========>>\",data)\r\n return render_template(\"Recognize_out.html\",data=data,loc=loc)\r\n\r\n\r\n\r\n\r\n@app.route('/train',methods=[\"GET\"])\r\ndef train():\r\n return Response(train_embedding(recognizer, le, lelock, dataLock, \"/EmployeeDataset\"),content_type='text/event-stream')\r\n\r\n@app.route('/auth_device',methods=[\"POST\"])\r\ndef auth_device():\r\n data=request.get_json()\r\n print(\"Data=======>>\",data[\"deviceID\"])\r\n if data[\"deviceID\"] != \"\":\r\n resp=auth_deviceID(data[\"deviceID\"])\r\n print(\"Controler value====>\",resp)\r\n if(resp):\r\n return jsonify({\"data\":\"true\"})\r\n else:\r\n return jsonify({\"data\": \"false\"})\r\n else:\r\n return jsonify({\"data\": \"false\"})\r\n\r\n\r\n@app.route('/gen_deviceId',methods=[\"POST\"])\r\ndef gen_DeviceId():\r\n data=request.get_json()\r\n print(data)\r\n if data[\"ecn\"]==\"\" or data[\"location\"]==\"\" or data[\"name\"]==\"\":\r\n return jsonify({\"status\":\"missing Field\"})\r\n else:\r\n data= GenerateDeviceID(data)\r\n return jsonify({\"data\":data})\r\n\r\n\r\n@app.route('/create_user', methods=['POST'])\r\ndef create_user():\r\n try:\r\n data = request.form.getlist('image')\r\n ecn = request.form.get('ecn')\r\n lable = request.form.get('number')\r\n return jsonify(create_user_dataset(data, lable, ecn))\r\n except Exception as ex:\r\n logging.exception(\"An exception occured=====>>\")\r\n return jsonify({\"result\": \"Error\", \"status\": 500})\r\n # for a in range(10):\r\n # frame = cv2.imdecode(\r\n # np.frombuffer(base64.b64decode(data[a].split(',')[1]), dtype=np.uint8),\r\n # flags=cv2.IMREAD_COLOR)\r\n # cv2.imwrite(str(request.form.get('number')) + '.png', frame)\r\n # return jsonify({\"result\": \"Done\", \"status\": 200})\r\n\r\n@app.route('/del',methods=[\"POST\"])\r\ndef delete():\r\n print(request.form.get(\"id\"))\r\n if delete_embedding(request.form.get(\"id\")):\r\n return Response(train_embedding(recognizer, le, lelock, dataLock, \"/Empty\"),content_type='text/event-stream')\r\n else:\r\n return jsonify({\"status\":200})\r\n\r\n@app.route('/landing',methods=[\"GET\",\"POST\"])\r\ndef landing():\r\n print(request.args.get(\"id\"))\r\n if not (request.args.get(\"id\")):\r\n return jsonify({\"result\": \"Error\"})\r\n else:\r\n result =getLanding(request.args.get(\"id\"))\r\n return render_template(\"Landing.html\", data= result)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n init_db()\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"210792895","text":"import tensorflow as tf\r\nfrom tflearn.layers.conv import global_avg_pool\r\nfrom tensorflow.contrib.layers import batch_norm, flatten\r\nfrom tensorflow.contrib.framework import arg_scope\r\nfrom data_deal import *\r\nimport numpy as np\r\n\r\nclass_num = 100\r\nimage_wight = 96\r\nimage_hight = 48\r\nimg_channels = 3\r\n\r\nweight_decay = 0.0005#0.0005\r\nmomentum = 0.9\r\n\r\ninit_learning_rate = 0.0006 #0.1-》0.05->0.005->0.002\r\nreduction_ratio = 4 #减速比\r\n\r\n\r\n#56773\r\nbatch_size = 100\r\niteration = 572 #568*100=56800>56773—+400\r\n# 128 * 391 ~ 50,000\r\n\r\n#test 数据集循环的次数\r\nverify_iteration = 10\r\n#整个图片数据集,循环的周期数\r\ntotal_epochs = 100 #100*4=400\r\n\r\n#test predict数据集循环的次数\r\ntest_iteration = 10\r\n#整个图片数据集,循环的周期数\r\ntest_total_epochs = 100 #100*10=1000\r\n\r\n#预测结果由字典写入test.csv文件\r\ndef writeToCsv(test_name,predict_label,epoch,pre_index, file_path=TEST_SAVE_PATH):\r\n predict = predict_label.reshape(-1, 1)\r\n file_path1 = file_path + 'test_9_'+'_epoch_' +str(epoch)+'_'+ str(pre_index) + '.csv'\r\n try:\r\n with open(file_path1, 'a', newline='')as f:\r\n for i in range(predict.shape[0]):\r\n # if predict[i] == 99:\r\n # f.write(test_name[i] + ' ' + str(1) + '\\n')\r\n # else:\r\n f.write(test_name[i] + ' ' + str(predict[i, :][0] + 1) + '\\n')\r\n except Exception as error:\r\n print(error)\r\n\r\ndef data_label_load(data_name,data_label,data_type = 'train'):# train or test\r\n X_name, Y_label = data_name, data_label\r\n if data_type == 'test':\r\n data_path = TEST_IMAGE_PATH\r\n Y_label -= 1\r\n elif data_type == 'verify':\r\n data_path = TEST_IMAGE_PATH\r\n Y_label -= 1\r\n else:\r\n data_path = TRAIN_IMAGE_PATH\r\n pic_label = onehot_encoding(Y_label, class_num)\r\n pic_data = np.zeros([len(X_name), image_wight,image_hight,3])#(-1,96,48,3)\r\n for img_index in range(len(X_name)):\r\n img_dir = os.path.join(data_path, X_name[img_index])\r\n img = Image.open(img_dir)#(48*96*3)\r\n image_array = np.array(img).transpose(1,0,2)#.reshape(1,-1)##(96*48*3)\r\n pic_data[img_index,:,:,:] = image_array\r\n # pic_data.reshape([-1, image_wight, image_hight, 3])\r\n # print(np.shape(pic_data),pic_data[0,20:30,0:10,1])\r\n return pic_data, pic_label, X_name\r\n\r\ndef conv_layer(input, filter, kernel, stride=1, padding='SAME', layer_name=\"conv\"):\r\n with tf.name_scope(layer_name):\r\n network = tf.layers.conv2d(inputs=input, use_bias=True, filters=filter, kernel_size=kernel, strides=stride, padding=padding)\r\n network = Relu(network)\r\n return network\r\n\r\ndef Fully_connected(x, units=class_num, layer_name='fully_connected') :\r\n with tf.name_scope(layer_name) :\r\n return tf.layers.dense(inputs=x, use_bias=True, units=units)\r\n\r\ndef Relu(x):\r\n return tf.nn.relu(x)\r\n\r\ndef Sigmoid(x):\r\n return tf.nn.sigmoid(x)\r\n\r\ndef Global_Average_Pooling(x):\r\n return global_avg_pool(x, name='Global_avg_pooling')\r\n\r\ndef Max_pooling(x, pool_size=[3,3], stride=2, padding='VALID') :\r\n return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\r\n\r\ndef Avg_pooling(x, pool_size=[3,3], stride=1, padding='SAME') :\r\n return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)\r\n\r\ndef Batch_Normalization(x, training, scope):\r\n #arg_scope()为batch_norm填充一些共有的默认参数\r\n with arg_scope([batch_norm],\r\n scope=scope,\r\n updates_collections=None,\r\n decay=0.9,\r\n center=True,\r\n scale=True,\r\n zero_debias_moving_mean=True) :\r\n #tf.cond(),类似,if else\r\n return tf.cond(training,\r\n lambda : batch_norm(inputs=x, is_training=training, reuse=None),\r\n lambda : batch_norm(inputs=x, is_training=training, reuse=True))\r\n\r\ndef Concatenation(layers) :\r\n return tf.concat(layers, axis=3)\r\n\r\ndef Dropout(x, rate, training) :\r\n return tf.layers.dropout(inputs=x, rate=rate, training=training)\r\n\r\ndef Evaluate(sess,epoch, pre_index, Evaluate_count):\r\n Evaluate_count += 1\r\n verify_acc = 0.0\r\n verify_loss = 0.0\r\n verify_pre_index = 0\r\n add = 100 #100*10=1000\r\n #循环去test数据\r\n for it in range(verify_iteration):\r\n verify_batch_name = verify_name[verify_pre_index: verify_pre_index + add]\r\n verify_batch_label = verify_label[verify_pre_index: verify_pre_index + add]\r\n verify_batch_x, verify_batch_y, _ = data_label_load(verify_batch_name, verify_batch_label, data_type='verify')\r\n verify_batch_x = color_preprocessing(verify_batch_x)\r\n verify_batch_x = data_augmentation(verify_batch_x)\r\n verify_pre_index = verify_pre_index + add\r\n # #循环移Evaluate_count位\r\n # print('----',Evaluate_count)\r\n # print('++++***',np.argmax(verify_batch_y, 1),'\\n')\r\n # tem = (np.argmax(verify_batch_y, 1)+Evaluate_count+1)%(class_num-1)\r\n # verify_batch_y = onehot_encoding(tem,class_num)\r\n # print('***',np.argmax(verify_batch_y, 1),'\\n')\r\n verify_feed_dict = {\r\n x: verify_batch_x,\r\n label: verify_batch_y,\r\n learning_rate: epoch_learning_rate,\r\n training_flag: False\r\n }\r\n\r\n # loss_, acc_ = sess.run([cost, accuracy], feed_dict=verify_feed_dict)\r\n\r\n loss_, pre_labels_, acc_ = sess.run([cost, labels_max_idx, accuracy], feed_dict=verify_feed_dict)\r\n\r\n writeToCsv(verify_name, pre_labels_, epoch, pre_index)\r\n verify_loss += loss_\r\n verify_acc += acc_\r\n\r\n verify_loss /= verify_iteration # average loss\r\n verify_acc /= verify_iteration # average accuracy\r\n\r\n # if verify_acc >0.98:\r\n # writeToCsv(test_name, labels_pre, verify_acc)\r\n\r\n summary = tf.Summary(value=[tf.Summary.Value(tag='verify_loss', simple_value=verify_loss),\r\n tf.Summary.Value(tag='verify_accuracy', simple_value=verify_acc)])\r\n\r\n return verify_acc, verify_loss, summary\r\n#predict_TEST_LABELS\r\n\"\"\"\r\ndef predict_test_labels(sess,epoch):\r\n test_loss = 0.0\r\n acc = 0.0\r\n test_pre_index = 0\r\n add = 100 #100*10=1000\r\n #循环去test数据\r\n for it in range(test_iteration):\r\n test_batch_name = test_name[test_pre_index: test_pre_index + add]\r\n test_batch_label = test_label[test_pre_index: test_pre_index + add]\r\n test_batch_x, test_batch_y, test_batch_name = data_label_load(test_batch_name, test_batch_label, data_type='test')\r\n test_batch_x = color_preprocessing(test_batch_x)\r\n test_batch_x = data_augmentation(test_batch_x)\r\n test_pre_index = test_pre_index + add\r\n\r\n test_feed_dict = {\r\n x: test_batch_x,\r\n label: test_batch_y,\r\n learning_rate: epoch_learning_rate,\r\n training_flag: False\r\n }\r\n\r\n loss_, pre_labels_ ,acc= sess.run([cost, labels_max_idx, accuracy], feed_dict=test_feed_dict)\r\n # print('loss=',loss_)\r\n writeToCsv(test_name, pre_labels_, epoch)\r\n test_loss += loss_\r\n acc += acc\r\n test_loss /= test_iteration # average loss\r\n acc /= test_iteration\r\n # summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),\r\n # tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])\r\n\r\n return test_loss, acc\r\n\"\"\"\r\nclass SE_Inception_v4():\r\n def __init__(self, x, training):\r\n self.training = training\r\n self.model = self.Build_SEnet(x)\r\n\r\n def Stem(self, x, scope):\r\n with tf.name_scope(scope) :\r\n x = conv_layer(x, filter=32, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_conv1')\r\n x = conv_layer(x, filter=32, kernel=[3,3], padding='VALID', layer_name=scope+'_conv2')\r\n block_1 = conv_layer(x, filter=64, kernel=[3,3], layer_name=scope+'_conv3')\r\n\r\n split_max_x = Max_pooling(block_1)\r\n split_conv_x = conv_layer(block_1, filter=96, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv1')\r\n x = Concatenation([split_max_x,split_conv_x])\r\n\r\n split_conv_x1 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv2')\r\n split_conv_x1 = conv_layer(split_conv_x1, filter=96, kernel=[3,3], padding='VALID', layer_name=scope+'_split_conv3')\r\n\r\n split_conv_x2 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv4')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=64, kernel=[7,1], layer_name=scope+'_split_conv5')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=64, kernel=[1,7], layer_name=scope+'_split_conv6')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=96, kernel=[3,3], padding='VALID', layer_name=scope+'_split_conv7')\r\n\r\n x = Concatenation([split_conv_x1,split_conv_x2])\r\n\r\n split_conv_x = conv_layer(x, filter=192, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv8')\r\n split_max_x = Max_pooling(x)\r\n\r\n x = Concatenation([split_conv_x, split_max_x])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Inception_A(self, x, scope):\r\n with tf.name_scope(scope) :\r\n split_conv_x1 = Avg_pooling(x)\r\n split_conv_x1 = conv_layer(split_conv_x1, filter=96, kernel=[1,1], layer_name=scope+'_split_conv1')\r\n\r\n split_conv_x2 = conv_layer(x, filter=96, kernel=[1,1], layer_name=scope+'_split_conv2')\r\n\r\n split_conv_x3 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv3')\r\n split_conv_x3 = conv_layer(split_conv_x3, filter=96, kernel=[3,3], layer_name=scope+'_split_conv4')\r\n\r\n split_conv_x4 = conv_layer(x, filter=64, kernel=[1,1], layer_name=scope+'_split_conv5')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=96, kernel=[3,3], layer_name=scope+'_split_conv6')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=96, kernel=[3,3], layer_name=scope+'_split_conv7')\r\n\r\n x = Concatenation([split_conv_x1, split_conv_x2, split_conv_x3, split_conv_x4])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Inception_B(self, x, scope):\r\n with tf.name_scope(scope) :\r\n init = x\r\n\r\n split_conv_x1 = Avg_pooling(x)\r\n split_conv_x1 = conv_layer(split_conv_x1, filter=128, kernel=[1,1], layer_name=scope+'_split_conv1')\r\n\r\n split_conv_x2 = conv_layer(x, filter=384, kernel=[1,1], layer_name=scope+'_split_conv2')\r\n\r\n split_conv_x3 = conv_layer(x, filter=192, kernel=[1,1], layer_name=scope+'_split_conv3')\r\n split_conv_x3 = conv_layer(split_conv_x3, filter=224, kernel=[1,7], layer_name=scope+'_split_conv4')\r\n split_conv_x3 = conv_layer(split_conv_x3, filter=256, kernel=[1,7], layer_name=scope+'_split_conv5')\r\n\r\n split_conv_x4 = conv_layer(x, filter=192, kernel=[1,1], layer_name=scope+'_split_conv6')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=192, kernel=[1,7], layer_name=scope+'_split_conv7')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=224, kernel=[7,1], layer_name=scope+'_split_conv8')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=224, kernel=[1,7], layer_name=scope+'_split_conv9')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=256, kernel=[7,1], layer_name=scope+'_split_connv10')\r\n\r\n x = Concatenation([split_conv_x1, split_conv_x2, split_conv_x3, split_conv_x4])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Inception_C(self, x, scope):\r\n with tf.name_scope(scope) :\r\n split_conv_x1 = Avg_pooling(x)\r\n split_conv_x1 = conv_layer(split_conv_x1, filter=256, kernel=[1,1], layer_name=scope+'_split_conv1')\r\n\r\n split_conv_x2 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv2')\r\n\r\n split_conv_x3 = conv_layer(x, filter=384, kernel=[1,1], layer_name=scope+'_split_conv3')\r\n split_conv_x3_1 = conv_layer(split_conv_x3, filter=256, kernel=[1,3], layer_name=scope+'_split_conv4')\r\n split_conv_x3_2 = conv_layer(split_conv_x3, filter=256, kernel=[3,1], layer_name=scope+'_split_conv5')\r\n\r\n split_conv_x4 = conv_layer(x, filter=384, kernel=[1,1], layer_name=scope+'_split_conv6')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=448, kernel=[1,3], layer_name=scope+'_split_conv7')\r\n split_conv_x4 = conv_layer(split_conv_x4, filter=512, kernel=[3,1], layer_name=scope+'_split_conv8')\r\n split_conv_x4_1 = conv_layer(split_conv_x4, filter=256, kernel=[3,1], layer_name=scope+'_split_conv9')\r\n split_conv_x4_2 = conv_layer(split_conv_x4, filter=256, kernel=[1,3], layer_name=scope+'_split_conv10')\r\n\r\n x = Concatenation([split_conv_x1, split_conv_x2, split_conv_x3_1, split_conv_x3_2, split_conv_x4_1, split_conv_x4_2])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Reduction_A(self, x, scope):\r\n with tf.name_scope(scope) :\r\n k = 256\r\n l = 256\r\n m = 384\r\n n = 384\r\n\r\n split_max_x = Max_pooling(x)\r\n\r\n split_conv_x1 = conv_layer(x, filter=n, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv1')\r\n\r\n split_conv_x2 = conv_layer(x, filter=k, kernel=[1,1], layer_name=scope+'_split_conv2')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=l, kernel=[3,3], layer_name=scope+'_split_conv3')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=m, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv4')\r\n\r\n x = Concatenation([split_max_x, split_conv_x1, split_conv_x2])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Reduction_B(self, x, scope):\r\n with tf.name_scope(scope) :\r\n split_max_x = Max_pooling(x)\r\n\r\n split_conv_x1 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv1')\r\n split_conv_x1 = conv_layer(split_conv_x1, filter=384, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv2')\r\n\r\n split_conv_x2 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv3')\r\n split_conv_x2 = conv_layer(split_conv_x2, filter=288, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv4')\r\n\r\n split_conv_x3 = conv_layer(x, filter=256, kernel=[1,1], layer_name=scope+'_split_conv5')\r\n split_conv_x3 = conv_layer(split_conv_x3, filter=288, kernel=[3,3], layer_name=scope+'_split_conv6')\r\n split_conv_x3 = conv_layer(split_conv_x3, filter=320, kernel=[3,3], stride=2, padding='VALID', layer_name=scope+'_split_conv7')\r\n\r\n x = Concatenation([split_max_x, split_conv_x1, split_conv_x2, split_conv_x3])\r\n\r\n x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')\r\n x = Relu(x)\r\n\r\n return x\r\n\r\n def Squeeze_excitation_layer(self, input_x, out_dim, ratio, layer_name):\r\n with tf.name_scope(layer_name) :\r\n squeeze = Global_Average_Pooling(input_x)\r\n\r\n excitation = Fully_connected(squeeze, units=out_dim / ratio, layer_name=layer_name+'_fully_connected1')\r\n excitation = Relu(excitation)\r\n excitation = Fully_connected(excitation, units=out_dim, layer_name=layer_name+'_fully_connected2')\r\n excitation = Sigmoid(excitation)\r\n\r\n excitation = tf.reshape(excitation, [-1,1,1,out_dim])\r\n\r\n scale = input_x * excitation\r\n\r\n return scale\r\n\r\n def Build_SEnet(self, input_x):\r\n # [0,0],[0,0],[24,24],[0,0],(96*48)->(96*96),baidu architecture\r\n input_x = tf.pad(input_x, [[0, 0], [0, 0], [24, 24], [0, 0]])\r\n\r\n x = self.Stem(input_x, scope='stem')\r\n\r\n for i in range(4) :\r\n x = self.Inception_A(x, scope='Inception_A'+str(i))\r\n channel = int(np.shape(x)[-1])\r\n x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_A'+str(i))\r\n\r\n x = self.Reduction_A(x, scope='Reduction_A')\r\n\r\n for i in range(7) :\r\n x = self.Inception_B(x, scope='Inception_B'+str(i))\r\n channel = int(np.shape(x)[-1])\r\n x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_B'+str(i))\r\n\r\n x = self.Reduction_B(x, scope='Reduction_B')\r\n\r\n for i in range(3) :\r\n x = self.Inception_C(x, scope='Inception_C'+str(i))\r\n channel = int(np.shape(x)[-1])\r\n x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_C'+str(i))\r\n\r\n x = Global_Average_Pooling(x)\r\n x = Dropout(x, rate=0.2, training=self.training)\r\n x = flatten(x)\r\n\r\n x = Fully_connected(x, layer_name='final_fully_connected')\r\n return x\r\n\r\n\"\"\"开始的地方,lode数据,开始训练\"\"\"\r\ntrain_name, train_label, verify_name, verify_label, test_name, test_label = prepare_data()\r\n\r\nx = tf.placeholder(tf.float32, shape=[None, image_wight, image_hight, img_channels])\r\nlabel = tf.placeholder(tf.float32, shape=[None, class_num])\r\n\r\ntraining_flag = tf.placeholder(tf.bool)\r\n\r\n\r\nlearning_rate = tf.placeholder(tf.float32, name='learning_rate')\r\n\r\nlogits = SE_Inception_v4(x, training=training_flag).model\r\nlabels_max_idx = tf.argmax(logits, axis=1, name='labels_max_idx')\r\n\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))\r\n\r\nl2_loss = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])\r\noptimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)\r\ntrain = optimizer.minimize(cost + l2_loss * weight_decay)#######\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\nsaver = tf.train.Saver(tf.global_variables())\r\n\r\n# with tf.Session() as sess:\r\nwith tf.Session() as sess :\r\n ckpt = tf.train.get_checkpoint_state('./model')\r\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\r\n saver.restore(sess, ckpt.model_checkpoint_path)\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n summary_writer = tf.summary.FileWriter('./logs', sess.graph)\r\n #调用次数,即结果循环移位的次数\r\n Evaluate_count = 0\r\n\r\n #可变的学习速率\r\n epoch_learning_rate = init_learning_rate\r\n for epoch in range(1, total_epochs + 1):\r\n if epoch % 10 == 0 :#整个数据集循环几个周期后,学习速率下降10倍\r\n epoch_learning_rate = epoch_learning_rate / 10\r\n\r\n pre_index = 0\r\n train_acc = 0.0\r\n train_loss = 0.0\r\n \"\"\"\r\n #进行预测输出\r\n loss_test = predict_test_labels(sess, epoch)\r\n print('test_loss: %.4f'%loss_test)#无意义,因为test的label为0,其损失无意义\r\n \"\"\"\r\n # 循环训练,\r\n for step in range(1, iteration + 1):\r\n #50000总的样本数量,周期循环\r\n \"\"\" 这里batch循环读取\"\"\"\r\n #这里batch循环读取\r\n if pre_index + batch_size < 57173:#56773\r\n batch_train_name = train_name[pre_index: pre_index + batch_size]\r\n batch_train_label = train_label[pre_index: pre_index + batch_size]\r\n else:\r\n batch_train_name = train_name[pre_index:]\r\n batch_train_label = train_label[pre_index:]\r\n batch_x, batch_y, _ = data_label_load(batch_train_name, batch_train_label, data_type='train')\r\n # print(len(batch_x),len(batch_x[0]))\r\n batch_x = color_preprocessing(batch_x)\r\n batch_x = data_augmentation(batch_x)\r\n # print('new', len(batch_x[0]), len(batch_x[0][0]), len(batch_x[0][0][0]))\r\n # print(batch_x[0][0])\r\n #训练数据字典\r\n train_feed_dict = {\r\n x: batch_x,\r\n label: batch_y,\r\n learning_rate: epoch_learning_rate,\r\n training_flag: True\r\n }\r\n print('pre_index =',pre_index)\r\n _, batch_loss = sess.run([train, cost], feed_dict=train_feed_dict)\r\n print('batch_loss =',batch_loss)\r\n\r\n pre_labels = labels_max_idx.eval(feed_dict=train_feed_dict)\r\n batch_acc = accuracy.eval(feed_dict=train_feed_dict)\r\n print('batch_acc =', batch_acc,'\\n')\r\n\r\n train_loss += batch_loss\r\n train_acc += batch_acc\r\n if pre_index %5000 == 0:\r\n saver.save(sess=sess, save_path='./model/Inception_v4.ckpt')\r\n\r\n if pre_index %10000 == 0:\r\n test_acc, test_loss, _ = Evaluate(sess, epoch, pre_index, Evaluate_count)\r\n print(\"test_sets_loss: %.4f, test_sets_acc: %.4f \\n\" % (test_loss, test_acc))\r\n\r\n #循环steps递增\r\n pre_index += batch_size\r\n\r\n\r\n train_loss /= iteration # average loss\r\n train_acc /= iteration # average accuracy\r\n train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),\r\n tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])\r\n\r\n test_acc, test_loss, test_summary = Evaluate(sess, epoch, pre_index, Evaluate_count)\r\n\r\n summary_writer.add_summary(summary=train_summary, global_step=epoch)\r\n summary_writer.add_summary(summary=test_summary, global_step=epoch)\r\n summary_writer.flush()\r\n\r\n line = \"epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \\n\" % (\r\n epoch, total_epochs, train_loss, train_acc, test_loss, test_acc)\r\n\r\n # line = \"epoch: %d/%d, train_loss: %.4f, train_acc: %.4f \\n\" % (\r\n # epoch, total_epochs, train_loss, train_acc)\r\n print(line)\r\n #增加训练log日志\r\n with open('logs.txt', 'a') as f:\r\n f.write(line)\r\n #保存训练好的模型,这里每训练一次都保存\r\n saver.save(sess=sess, save_path='./model/Inception_v4.ckpt')","sub_path":"Inception_v4/SE_Inception_v4.py","file_name":"SE_Inception_v4.py","file_ext":"py","file_size_in_byte":22939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"578072395","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Restaurant, Dish, Review, RestaurantReview, DishReview\nfrom .forms import RestaurantForm\nfrom django.contrib.auth.decorators import login_required\n\ndef index(request):\n return render(request, \"index.html\")\n\ndef show_restaurants(request):\n rest = Restaurant.objects.all()\n context = {\n 'restaurants': rest\n }\n return render(request, \"restaurant/show.html\", context)\n\n@login_required\ndef add_restaurant(request):\n if request.method == 'POST':\n form = RestaurantForm(request.POST)\n if form.is_valid:\n form.save()\n return redirect('show_restaurants')\n form = RestaurantForm()\n context = {\n 'form': form,\n }\n return render(request, \"restaurant/add.html\", context)\n\n@login_required\ndef update_restaurant(request, id):\n instance = get_object_or_404(Restaurant, pk=id)\n form = RestaurantForm(request.POST or None, instance=instance)\n if form.is_valid():\n form.save()\n return redirect('show_restaurants')\n context = {\n 'form': form,\n 'name': instance.name,\n 'id': id,\n }\n return render(request, 'restaurant/update.html', context) \n\n@login_required\ndef delete_restaurant(request, id):\n Restaurant.objects.get(pk=id).delete()\n return redirect('show_restaurants')","sub_path":"Midterm (updated)/Restaurant/RMenu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194525422","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport datetime\nfrom finlab.data import Data\nimport sys\n\n# ----------------------\n# get all strategies\n# ----------------------\nimport os\nsnames = [py for py in os.listdir('strategies') if py[-3:] == '.py' and py != '__init__.py']\nstrategies = {}\nfor s in snames:\n print('strategies.' + s[:-3])\n strategies[s[:-3]] = getattr(__import__('strategies.' + s[:-3]), s[:-3]).strategy\n\n\n# ----------------------\n# dataframe to html\n# ----------------------\ndef generate_table(dataframe, max_rows=10):\n \"\"\"\n 將 dataframe 讀進來,轉換成 html 中的 table\n \"\"\"\n table_value = []\n \n # 對於每個rows\n for i in range(min(len(dataframe), max_rows)):\n \n row = []\n \n # rows 中的每個元素\n for col in dataframe.columns:\n \n # 假如此 column 的最後兩個字為「漲跌」則根據正負顯示顏色\n if col[-2:] == '漲跌':\n color = 'red' if dataframe.iloc[i][col] >= 0 else 'green'\n row.append(html.Td(dataframe.iloc[i][col], style={'color': color}))\n else:\n row.append(html.Td(dataframe.iloc[i][col]))\n \n table_value.append(html.Tr(row))\n \n \n return html.Table(\n # Table Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Table Body\n table_value,\n className='table table-striped',\n )\n\n# ------------------------\n# simulation for strategy\n# ------------------------\ndef simulation(strategy, data, date):\n \n \"\"\"\n 給定一個策略,還有日期,我們產生策略當天的股票清單\n 並��產生股票從該日期到最近期的漲跌幅狀況(dataframe)\n 並output出 dataframe 、 曲線圖 還有最近期晚上的日期\n \"\"\"\n \n # record the original date for data\n org_date = data.date\n \n # get the stock list on the \"date\"\n data.date = date\n slist = strategy(data).index\n \n # select a subset of price\n data.date = datetime.datetime.now().date()\n ndays = (datetime.datetime.now().date() - date).days\n prices = data.get('收盤價', ndays+10)\n prices = prices[slist][date+datetime.timedelta(days=1):]\n \n df = pd.DataFrame()\n if not prices.empty:\n \n # 製作 dataframe\n buy_price = prices.iloc[0]\n current_price = prices.iloc[-1]\n yesterday_price = prices.iloc[-2]\n today_gain = (prices.iloc[-1] / prices.iloc[-2] - 1)*100\n total_gain = (prices.iloc[-1] / prices.iloc[0] - 1)*100\n\n df = pd.DataFrame({\n '買入股價': buy_price,\n '今日股價': current_price,\n '昨日股價': yesterday_price,\n '今日漲跌': today_gain,\n '至今漲跌': total_gain,\n })\n df = df[['買入股價', '昨日股價', '今日股價', '今日漲跌', '至今漲跌']]\n \n # equality\n eq = (prices/prices.bfill().iloc[0]).mean(axis=1)\n last_day = prices.index[-1]\n else:\n # 製作 dataframe\n prices = data.get('收盤價', ndays+10)\n \n df = pd.DataFrame({\n '今日收盤': prices[slist].iloc[-1]\n })\n \n # equality\n eq = pd.Series(1, index=pd.to_datetime([prices.index[-1]]))\n last_day = prices.index[-1]\n \n data.date = org_date\n return df, eq, str(last_day).split()[0] + '晚上的狀況'\n\n\n\n# ----------------------\n# Dash start 樣式表\n# ----------------------\n\napp = dash.Dash()\n\n# ----------------------\n# CSS style setting\n# ----------------------\n\n# bootstrap CSS\napp.css.append_css({\"external_url\": \"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\"})\n\n# Dash CSS\napp.css.append_css({\"external_url\": \"https://codepen.io/chriddyp/pen/bWLwgP.css\"})\n\n# Loading screen CSS\napp.css.append_css({\"external_url\": \"https://codepen.io/chriddyp/pen/brPBPO.css\"})\n\n# ----------------------\n# HTML layout\n# ----------------------\n\napp.layout = html.Div(children=[\n \n # 網頁標題\n html.H1(children='策略監控台'),\n html.Br(),\n \n # 選擇策略\n html.H4(children='策略名稱'),\n dcc.Dropdown(\n id='strategy-picker',\n options=[{'label': name, 'value': name} for name, func in strategies.items()],\n ),\n html.Br(),\n \n # 選擇日期\n html.H4(children='選股日期'),\n dcc.DatePickerSingle(\n id='date-picker',\n min_date_allowed=datetime.datetime(2014, 8, 1),\n max_date_allowed=datetime.datetime(2200, 1, 1),\n initial_visible_month=datetime.datetime.now(),\n #date=datetime.datetime.now(),\n ),\n html.Br(),\n html.Br(),\n \n # 顯示結果\n html.Div(id='table'),\n], style={'width':'80%', 'margin':'10%'})\n\n# ----------------------\n# 使用者互動的邏輯\n# ----------------------\n\n@app.callback(\n # 此 function 產生的 output 會於 table 顯示\n dash.dependencies.Output(component_id='table', component_property='children'),\n \n # 此 function 的 input 是「策略選單」跟「日期選單」\n [dash.dependencies.Input(component_id='strategy-picker', component_property='value'),\n dash.dependencies.Input('date-picker', 'date')]\n)\ndef update_output_div(input_value, date):\n \n \"\"\"\n 根據所選策略、所選時間,用simulation函式來產生 dataframe equality 和標題\n 並且顯示於網頁中的 table 上\n \"\"\"\n \n # 還沒選擇\n if date is None or input_value is None:\n return html.H4(children='請選擇上方的策略與日期')\n try:\n # 監測開始\n print('start simulation')\n date = datetime.datetime.strptime(date.split()[0], '%Y-%m-%d')\n data = Data()\n \n # 根據所選策略、所選時間,用simulation函式來產生 dataframe equality 和標題\n df, eq, s = simulation(strategies[input_value], data, date.date())\n print('end simulation')\n df.index.name = '股票代號'\n \n # 產生 html \n return html.Div(children=[html.H3(s), html.Br(), dcc.Graph(\n id='example-graph-2',\n \n # 畫圖\n figure={\n 'data': [\n {'x': eq.index, 'y': eq, 'type': 'line', 'name': 'SF'},\n ],\n 'layout': {\n }\n }\n # 用 generate_table 將 dataframe 轉成 HTML table \n ), generate_table(df.reset_index().round(2), len(df))\n ])\n except:\n errorlog = \"Unexpected error: \" + str(sys.exc_info())\n return html.H4(children='遇到了一些問題喔!' + errorlog)\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, processes=1)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"587152559","text":"site_files = [open(\"site_list_\"+str(i+1), \"w\") for i in range(10)]\n\ncompleted_site_files = [\"completed_sites_site_list_{}\".format(i+1) for i in range(6)]\n\ncompleted_set = set()\nwith open('error_log', 'r') as error_log:\n sites_w_errors = error_log.read().split(\"\\n\")\n completed_set |= set(sites_w_errors)\n\nfor completed_site_file in completed_site_files:\n with open(completed_site_file, \"r\") as completed_site:\n sites_completed = [i.split(\",\")[0] for i in completed_site.read().split(\"\\n\")]\n completed_set |= set(sites_completed)\n\nprint(len(completed_set))\n\nwith open(\"site_list\", \"r\") as file:\n sites = file.read().split(\"\\n\")\n missing = 0\n cnt = 0\n for site in sites:\n if site not in completed_set:\n site_files[cnt].write(site+\"\\n\")\n cnt = (cnt+1) % len(site_files)\n else:\n missing+=1\n\nprint(missing)\n\nfor i in site_files:\n i.close()\n","sub_path":"site_lists/SplitSites.py","file_name":"SplitSites.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"86715725","text":"from django.test import TestCase\nfrom yaml_creator.models.Variable import Variable\nfrom yaml_creator.models.ModelDescriptor import ModelDescriptor\nfrom yaml_creator.models.StateVectorPosition import StateVectorPosition \nfrom django.db import transaction\n\n#class VariableTest(TestCase):\n# def setUp(self)\n# m1=ModelDescriptor(folder_name='default_1')\n# m2=ModelDescriptor(folder_name='default_2')\n# m3=ModelDescriptor(folder_name='default_3')\nfor m in ModelDescriptor.objects.all():\n m.delete()\n\nm1=ModelDescriptor(folder_name='default_1')\nm1.save()\ny=Variable(model_id=m1,symbol='y')\ny.save()\n# the next lines should create an exeption\n#v2=Variable(model_id=m1,symbol='y')\n#v2.save()\nx=Variable(model_id=m1,symbol='x')\nx.save()\nk=Variable(model_id=m1,symbol='k')\nk.save()\nStateVectorPosition.objects.create(var_id=x,pos_id=0)\nStateVectorPosition.objects.create(var_id=y,pos_id=1)\n# now make a query to extract the Statevector for 'default_1'\n[ v.symbol for v in Variable.objects.filter( statevectorposition__var_id__model_id=\"default_1\").order_by('statevectorposition__pos_id') ]\n\n","sub_path":"prototypes/databases/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"256455205","text":"# 0.3.1\n# * Can print every tweet!\n# *\n\n### This is a list of modules to import\nimport sys\nimport os\nimport time\nimport pandas as pd\nimport usb.core\nimport usb.util\nimport tweepy as tw\nimport tweepy as tweepy\nimport escpos\nimport usb\nimport escpos.printer\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nfrom clint.textui import prompt, puts, colored, validators\nfrom xml.dom import minidom\nfrom escpos.printer import Usb\n\n\"\"\" Bus 001 Device 015: ID 04b8:0e03 Seiko Epson Corp. \"\"\"\n\n\n# choose english as language\nos.environ[\"INVOICE_LANG\"] = \"en\"\n\n### This is a list of itmes that need information stored. The defualt is \"None\"\nname = None\ninst = None\nprnt = None\nprnted = None\nagain = None\nend = None\np = Usb(0x04b8, 0x0e03, 0)\nSearchList = ['dogs', 'climate', 'trump']\n\n\n\n### Variables that contains the user credentials to access Twitter API\nACCESS_TOKEN = '3876050776-EmePZnqCbx6Bgy2DVxvgbKGC2lQF0BMhPokS2sA'\nACCESS_SECRET = 'iV6Rs6TyQ1Vm38FO3uq1NioVMycfBQYP2J0Im2mY1j6Ig'\nCONSUMER_KEY = 'Z97ABcvq569Itkg3HuNiRR1oX'\nCONSUMER_SECRET = '5NNxtyZ9NkaYn1ufw9IAIKumXS8SgYToqaKsEBf81UzhaNUhPC'\n\n### This sets up the use of colors in terminal. Not Necessary but makes things look nicer.\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nsys.path.insert(0, os.path.abspath('..'))\n\n\n### Clears screen\nos.system('clear')\n\nagain = \"again\"\n\nwhile True:\n\n ### Clears screen\n os.system('clear')\n prnt = None\n prnted = None\n if again == 'again':\n if __name__ == '__main__':\n if name is None:\n name = prompt.query(\"What's your twitter username?\")\n ### Clears screen\n os.system('clear')\n\n inst_options = [{'selector':'1','prompt':'My Profile','return':'profile'},\n {'selector':'2','prompt':'User','return':'user'},\n {'selector':'3','prompt':'Term','return':'term'},\n {'selector':'4','prompt':'Location','return':'location'}]\n if inst is None:\n inst = prompt.options(\"What would you like to search for \" + bcolors.BOLD + bcolors.HEADER + name + bcolors.ENDC + \"?\", inst_options)\n\n\n if inst == 'profile':\n os.system('clear')\n\n print(\"Attempting to find your profile. Please note: If your account is set to 'Private' we will not be able to access your information.\")\n time.sleep(5)\n\n elif inst == 'user':\n os.system('clear')\n profile = prompt.query(\"What user would you like to search for?\")\n\n ### Clears screen\n os.system('clear')\n print(\"Searching for \" + bcolors.WARNING + bcolors.BOLD + profile + \".\" + bcolors.ENDC)\n\n print(\"Archive Initializing; Importing tweepy\")\n\n # Import the necessary package to process data in JSON format\n\n print(\"Archive Initializing; Importing twitter API access tokens\")\n\n # Setup tweepy to authenticate with Twitter credentials:\n\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\n print(\"Archive; Establishing connection with Twitter\")\n\n # Create the api to connect to twitter with your creadentials\n # wait_on_rate_limit= True; will make the api to automatically wait for rate limits to replenish\n # wait_on_rate_limit_notify= Ture; will make the api to print a notification when Tweepyis waiting for rate limits to replenish\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n\n #--------------------------------------------------------------------------\n # Twitter Query\n # This section define the query and what information is being pulled\n #--------------------------------------------------------------------------\n print(\"Archive; Search starting\")\n\n # The search term you want to find\n query = profile + \" -filter:retweets\"\n\n # Language code (follows ISO 639-1 standards)\n language = \"en\"\n\n # Calling the user_timeline function with our parameters\n results = api.lookup_users(q=query, lang=language, tweet_mode=\"extended\")\n\n print(\"Archive; Search complete\")\n\n #--------------------------------------------------------------------------\n # Search Results\n # This section provides the results from the search\n #--------------------------------------------------------------------------\n print(\"Archive; Loading results\")\n\n # foreach through all tweets pulled\n for tweet in results:\n\n users_locs = [[tweet.user.screen_name, tweet.user.location, tweet.full_text, ] for tweet in results]\n # printing the text stored inside the tweet object\n print(bcolors.HEADER + bcolors.BOLD + \"@\" + tweet.user.screen_name, bcolors.ENDC + bcolors.OKBLUE + \"Tweeted:\",bcolors.OKGREEN + tweet.full_text, \"from\", bcolors.FAIL + tweet.user.location + bcolors.ENDC)\n tweet.text = pd.DataFrame(data=users_locs,\n columns=['user', \"location\", \"text\",])\n #----------------------------------------------------------------------------------------\n ## This should prompt the user to print results, encrypt and print, or to search again.\n #----------------------------------------------------------------------------------------\n\n prnt_options = [{'selector':'1','prompt':'Print','return':'print'},\n {'selector':'2','prompt':'Search again','return':'again'},\n {'selector':'3','prompt':'End Session','return':'end'},]\n\n if prnt is None:\n prnt = prompt.options(\"What would you like to do, \" + bcolors.BOLD + bcolors.HEADER + name + bcolors.ENDC + \"?\", prnt_options)\n print(prnt)\n if prnt == 'print':\n print(\"Now printing. Please wait.\")\n # os.system('lpr Reciept.pdf')\n elif prnt == 'encry':\n print(\"Printing encrypted content. Please wait.\")\n elif prnt == 'again':\n again = again\n inst = None\n elif prnt == 'end':\n os.system('clear')\n print(\"Ending session. Thanks for using Archive.\")\n time.sleep(10)\n again = again\n inst = None\n name = None\n\n elif inst == 'term':\n os.system('clear')\n\n print(SearchList)\n\n term = prompt.query(\"What term would you like to search for?\")\n ##Add term to list of terms\n SearchList.append(term)\n\n print(SearchList)\n\n ### Clears screen\n os.system('clear')\n print(\"Searching for \" + bcolors.WARNING + bcolors.BOLD + term + \".\" + bcolors.ENDC)\n\n time.sleep(1.5)\n print(\"Archive Initializing; Importing tweepy\")\n\n # Import the necessary package to process data in JSON format\n\n print(\"Archive Initializing; Importing twitter API access tokens\")\n\n # Setup tweepy to authenticate with Twitter credentials:\n\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n\n print(\"Archive; Establishing connection with Twitter\")\n\n # Create the api to connect to twitter with your creadentials\n # wait_on_rate_limit= True; will make the api to automatically wait for rate limits to replenish\n # wait_on_rate_limit_notify= Ture; will make the api to print a notification when Tweepyis waiting for rate limits to replenish\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n\n #--------------------------------------------------------------------------\n # Twitter Query\n # This section define the query and what information is being pulled\n #--------------------------------------------------------------------------\n print(\"Archive; Search starting\")\n\n # The search term you want to find\n query = term + \" -filter:retweets\"\n\n # Language code (follows ISO 639-1 standards)\n language = \"en\"\n\n # Calling the user_timeline function with our parameters\n results = api.search(q=query, lang=language, tweet_mode=\"extended\")\n\n print(\"Archive; Search complete\")\n\n #--------------------------------------------------------------------------\n # Search Results\n # This section provides the results from the search\n #--------------------------------------------------------------------------\n print(\"Archive; Loading results\")\n\n # foreach through all tweets pulled\n for tweet in results:\n\n users_locs = [[tweet.user.screen_name, tweet.user.location, tweet.full_text, ] for tweet in results]\n p.text(\"@\" + tweet.user.screen_name + \" Tweeted: \\n\" + tweet.full_text + \" from \" + tweet.user.location + \"\\n\" + \"\\n\")\n p.cut()\n #----------------------------------------------------------------------------------------\n ## This should prompt the user to print results, encrypt and print, or to search again.\n #----------------------------------------------------------------------------------------\n\n prnt_options = [{'selector':'1','prompt':'Search again','return':'again'},\n {'selector':'2','prompt':'End Session','return':'end'},]\n\n if prnt is None:\n os.system('clear')\n prnt = prompt.options(\"Your receipt has been printed. \\nThank you for the information. \\nWhat would you like to do now, \" + bcolors.BOLD + bcolors.HEADER + name + bcolors.ENDC + \"?\", prnt_options)\n\n if prnt == 'print':\n os.system('clear')\n print(\"Now printing. Please wait.\")\n\n\n p = Usb(0x04b8, 0x0e03, 0)\n for tweet in results:\n escpos.set(bold=True)\n p.text(\"@\" + tweet.user.screen_name + \" Tweeted: \\n\")\n escpos.set(bold=False)\n p.text(tweet.full_text + \" from \" + tweet.user.location + \"\\n\" + \"\\n\")\n\n p.barcode(UPC-A, 12345678, height=64, width=3, pos=u'BELOW', font=u'A', align_ct=True, function_type=None, check=True)\n\n p.cut()\n\n\n elif prnt == 'encry':\n print(\"Printing encrypted content. Please wait.\")\n elif prnt == 'again':\n again = again\n inst = None\n elif prnt == 'end':\n os.system('clear')\n print(\"Ending session. Thanks for using Archive.\")\n time.sleep(10)\n again = again\n inst = None\n name = None\n\n elif inst == 'location':\n ### Clears screen\n os.system('clear')\n location = prompt.query(\"What location would you like to search for?\")\n\n ### Clears screen\n os.system('clear')\n print(\"Locating tweets from \" + location + \".\")\n","sub_path":"Archive.py","file_name":"Archive.py","file_ext":"py","file_size_in_byte":11814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"547350781","text":"from unittest.mock import patch\nimport sys\nimport warnings\nfrom itertools import chain\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.commands import makemigrations\n\nfrom django.apps import apps\n\nfrom django.db.migrations import Migration\nfrom django.db.migrations.autodetector import MigrationAutodetector\nfrom django.db.migrations.loader import MigrationLoader\nfrom django.db.migrations.operations import CreateModel\nfrom django.db.migrations.state import ProjectState\nfrom django.utils.deprecation import RemovedInDjango20Warning\n\nfrom ...base import CombinedModelViewBase, CombinedModelView\nfrom ...operations import CreateCombinedView, RemoveCombinedView\n\n\n#Combined view gathering for custom migrations\ndef combined_models_of_app(appconfig):\n return [ m for m in appconfig.get_models() if issubclass(m, CombinedModelView) ]\n\n\ndef gather_combined_models(labels = None):\n if labels is None:\n configs = apps.get_app_configs()\n else:\n configs = [ apps.get_app_config(label) for label in labels ]\n model_iter = chain.from_iterable(combined_models_of_app(config) for config in configs)\n return { (model._meta.app_label, model._meta.model_name) : model for model in model_iter }\n\n\nclass CombinedViewCheckingCreateModel(CreateModel):\n def references_model(self, name, app_label=None):\n if any(isinstance(base, CombinedModelViewBase) for base in self.bases):\n return False\n return super(CreateModel, self).references_model(name, app_label)\n\n\nclass Command(makemigrations.Command):\n help = 'Runs makemigrations with some injected code that causes makemigrations to ignore combined model views, ' \\\n 'then adds the combined model views separately.'\n\n\n \"\"\"To avoid the risk of overloading names from the makemigrations.Command class, I've prefixed all attributes\n I assign to this Command class, or an instance thereof, with '_mcv_' (short for 'Make Combined Views'). This\n convention makes it unlikely that my names will ever collide with Django's, even if updates are made to the\n Django makemigrations script.\"\"\"\n def _mcv_step2_init(self, *app_labels, **options):\n self._mcv_loader = MigrationLoader(None)\n self._mcv_combined_model_nodes = {tup_id: migration\n for tup_id, migration in self._mcv_loader.graph.nodes.items()\n if any([type(op) == CreateCombinedView for op in migration.operations])}\n self._mcv_current_combined_models = gather_combined_models()\n self._mcv_current_state = self._mcv_loader.project_state()\n self._mcv_latest_combined_model_state = self._mcv_loader.project_state(nodes=list(self._mcv_combined_model_nodes.keys()))\n\n def handle(self, *app_labels, **options):\n\n \"\"\"A filthy hack. Can't be helped so far as I can tell because Django makes the assumption\n that, other than /the class Model/, all classes whose metaclasses inherit from ModelBase are user-defined\n models that need to have normal migration logic applied to them. This usually doesn't matter, since\n CombinedModelViews are forced to be unmanaged (see combine.base.CombinedModelViewBase). However,\n the CreateModel operation is applied even to unmanaged models. So we have to monkey patch it.\"\"\"\n with patch('django.db.migrations.operations.CreateModel',\n new=CombinedViewCheckingCreateModel):\n super(Command, self).handle(*app_labels, **options)\n\n #TODO: Handle wacky makemigrations options such as --dry-run.\n\n self._mcv_step2_init(*app_labels, **options)\n\n models_to_add, models_to_remove = self._mcv_get_combined_model_additions_and_removals()\n app_labels_to_use = set([label_and_model[0] for label_and_model in models_to_add]).union(\n set([label_and_model[0] for label_and_model in models_to_remove]))\n operations = { label: [] for label in app_labels_to_use }\n for app_label, model_name in models_to_add:\n model = self._mcv_current_combined_models[(app_label, model_name)]\n donors = model._combiner.donors\n renames = model._combiner.renames.deconstruct()\n operations[app_label].append(CreateCombinedView(model._meta.object_name, donors, renames))\n changes = {}\n for label, op_list in operations.items():\n subclass = type(str(\"Migration\"), (Migration,), {\"operations\": op_list, \"dependencies\": []})\n instance = subclass(\"combinedview\", app_label)\n changes[label] = [instance]\n # TODO: Record model changes among models\n #I don't actually care about autodetecting anything, I just want the arrange_for_graph() method\n autodetector = MigrationAutodetector(\n self._mcv_loader.project_state(),\n ProjectState.from_apps(apps)\n )\n changes = autodetector.arrange_for_graph(changes, self._mcv_loader.graph)\n if changes:\n self.write_migration_files(changes)\n\n def _mcv_historical_combined_models(self):\n # TODO: Currently counts all combined models that EVER existed, but should instead check the state history to\n # prune out overwritten/deleted combined models\n to_return = {}\n for app_label, migration_name in self._mcv_combined_model_nodes.keys():\n migration = self._mcv_combined_model_nodes[(app_label, migration_name)]\n model_name = [ o for o in migration.operations if type(o) == CreateCombinedView ][0].name.lower()\n to_return[(app_label, model_name)] = self._mcv_latest_combined_model_state.models[(app_label, model_name)]\n return to_return\n\n def _mcv_get_combined_model_additions_and_removals(self):\n #returns a 2-tuple of sets of (app_label, model) 2-tuples\n historic_combined_model_identifiers = set(self._mcv_historical_combined_models().keys())\n current_combined_model_identifiers = set(self._mcv_current_combined_models.keys())\n models_to_add = current_combined_model_identifiers - historic_combined_model_identifiers\n models_to_remove = historic_combined_model_identifiers - current_combined_model_identifiers\n return (models_to_add, models_to_remove)","sub_path":"combine/management/commands/makecombinedviews.py","file_name":"makecombinedviews.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"301631830","text":"# Writing the Academy Award Nominees to our database\n# Data Set: https://www.kaggle.com/unanimad/the-oscar-award?select=the_oscar_award.csv\n\ndef write_awards(dir_path):\n import pandas as pd\n import re\n\n awards = pd.read_csv(dir_path+\"\\\\\"+'data'+\"\\\\\"+'academy_awards.csv')\n\n # Important note to keep in mind, the title of the Best Picture Award has changed several times over the years.\n awards = awards[(awards['category'] == 'BEST PICTURE')|(awards['category'] == 'OUSTANDING PRODUCTION')|(awards['category'] == 'BEST MOTION PICTURE')]\n\n # These are the only two fields that we need from this data\n awards = awards[['film', 'winner']]\n\n # Normalizing our film titles to create a more accurate match column with our other data\n def normalize(x):\n x = str(x)\n x = x.lower()\n x = re.sub(' ','', x)\n x = re.sub('[^A-Za-z0-9]+', '', x)\n return x\n\n awards['match'] = awards['film'].apply(lambda x: normalize(x))\n\n return awards\n","sub_path":"Modules/clean_awards.py","file_name":"clean_awards.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"616828596","text":"# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kafka import KafkaClient, SimpleConsumer\nimport json\nimport logging\nfrom winchester.config import ConfigManager\nfrom winchester.trigger_manager import TriggerManager\nfrom datetime import datetime\n\nlog = logging.getLogger(__name__)\n\n\nclass EventProcessor():\n \"\"\"\n EventProcessor consumes events from kafka, and adds them to the\n stacktach winchester TriggerManager\n \"\"\"\n\n def __init__(self, kafka_url, group, event_topic, winchester_config): \n self._kafka_url = kafka_url\n self._group = group\n self._topic = event_topic\n self._winchester_config = winchester_config\n self._kafka = KafkaClient(self._kafka_url)\n self._consumer = SimpleConsumer(self._kafka,\n group,\n event_topic,\n auto_commit=True)\n self._consumer.seek(0, 2)\n self._consumer.provide_partition_info()\n self._config = ConfigManager.load_config_file(winchester_config)\n self._trigger_manager = TriggerManager(self._config)\n\n def run(self):\n for message in self._consumer:\n try:\n sub_message = message[1].message\n envelop_str = sub_message.value\n\n envelope = json.loads(envelop_str)\n event = envelope['event']\n\n if 'timestamp' in event:\n event['timestamp'] = datetime.strptime(event['timestamp'], \"%Y-%m-%dT%H:%M:%S.%f+00:00\")\n\n if 'when' in event:\n event['when'] = datetime.strptime(event['when'], \"%Y-%m-%dT%H:%M:%S.%f+00:00\")\n\n self._trigger_manager.add_event(event)\n\n except Exception as ex:\n log.exception()","sub_path":"monasca-event/monasca_event/event_processor.py","file_name":"event_processor.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"319626544","text":"import unittest\nfrom ddt import ddt, data, unpack\nfrom Allan.Back.read import OptimizeInsert\nfrom Allan.Back.sql_helper import *\nfrom Allan.Back.query import *\n\n\ndef can_open(file):\n return OptimizeInsert(file).open_workbook()\n\n\ndef insert_interval(file):\n return OptimizeInsert(file).set_insert_num()\n\n\ndef row_count(file):\n return OptimizeInsert(file).title_row\n\n\ndef current_rows(file):\n return OptimizeInsert(file).num_rows\n\n\ndef date_index(file):\n return OptimizeInsert(file).date_index\n\n\n@ddt\nclass ReadTests(unittest.TestCase):\n # New & Old Excel File\n @data(\"Workbook1.xlsx\", \"Workbook1.xls\")\n def testOne(self, value):\n return self.failUnless(can_open(value))\n\n @data(\"Workbook1.xlsx\", \"Workbook1.xls\")\n def testTwo(self, value):\n return self.assertEqual(insert_interval(value), 60)\n\n @data(\"Workbook1.xlsx\", \"Workbook1.xls\")\n def testThree(self, value):\n return self.assertEqual(row_count(value),\n ['Job #', 'Street #', 'Street Name', 'City',\n 'Job Name', 'Heads', 'Date',\n 'General Contractor', 'Estimator', 'Designer']\n )\n\n @data(\"Workbook1.xlsx\", \"Workbook1.xls\")\n def testFour(self, value):\n return self.assertEqual(current_rows(value), 60)\n\n @data(\"Workbook1.xlsx\", \"Workbook1.xls\")\n def testFive(self, value):\n return self.assertEqual(date_index(value), 6)\n\n # DNE Excel File\n\n @data(\"WoahDude.xlsx\", \"WoahBro.xlx\")\n def testSix(self, value):\n return self.assertRaises(FileNotFoundError, can_open, value)\n\n @data(\"WoahDude.xlsx\", \"WoahBro.xlx\")\n def testSeven(self, value):\n return self.assertRaises(FileNotFoundError, insert_interval, value)\n\n @data(\"WoahDude.xlsx\", \"WoahBro.xlx\")\n def testEight(self, value):\n return self.assertRaises(FileNotFoundError, current_rows, value)\n\n @data(\"WoahDude.xlsx\", \"WoahBro.xlx\")\n def testNine(self, value):\n return self.assertRaises(FileNotFoundError, date_index, value)\n\n\n@ddt\nclass QueryTests(unittest.TestCase):\n \n def testOne(self):\n return self.assertEqual(columns_master, [\n (0, \"id\", \"INTEGER PRIMARY KEY NOT NULL\"),\n (1, \"job_number\", \"TEXT NOT NULL DEFAULT (null)\"),\n (2, \"street_number\", \"TEXT DEFAULT (null)\"),\n (3, \"street_name\", \"TEXT DEFAULT (null)\"),\n (4, \"city\", \"TEXT DEFAULT (null)\"),\n (5, \"job_name\", \"TEXT DEFAULT (null)\"),\n (6, \"head_number\", \"TEXT DEFAULT (null)\"),\n (7, \"date\", \"DATETIME\"),\n (8, \"general_contractor\", \"TEXT DEFAULT(null)\"),\n (9, \"estimator\", \"TEXT DEFAULT(null)\"),\n (10, \"designer\", \"TEXT DEFAULT(null)\")\n ])\n\n def testTwo(self):\n return self.assertEqual(get_date_col(), 6)\n\n def testThree(self):\n return self.assertEqual(get_table(), 'job_list')\n\n def testFour(self):\n return self.assertTrue(os.path.isfile(os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'allan_jobs_db.sqlite')))\n\n def testFive(self):\n return self.assertFalse(os.path.isfile(os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'allan_jobs.sqlite')))\n\n @data(\n # job_number general with empty values\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name, head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \", ('%200%', ),\n (1, [('200', '457828', '3Ki!3No!', '9Lg.8Yf\"', '1Yc+7Dn!',\n '67', '1997-04-27', '', '', '4Fo(6Ar!')])\n ),\n # job_number with non numeric chars\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \",\n ('%6219T%', ), (1, [('6219T', '778342', '4Eb\"9Ld.', '8Iy.0Br!',\n '4Nt+8Uq%', '81', '1999-06-22',\n '7Nn!0Nz,', '1He*0Og-', '4Lq+4Mt#')])\n ),\n # job_number DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \", ('1', ),\n (0, [])\n ),\n # street_number < 20000\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_number LIKE ? \",\n ('%994%', ), (1, [('4739', '994', '8Uf\"7Dd)', '6Iv,4Kg\"',\n '1Zs#3Oe.', '628', '1914-01-26', '9Hy#9Zh/',\n '0Lt(0Xk.', '8Mz*3Mb*')])\n ),\n # street_number > 20000\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_number LIKE ? \",\n ('%863260%', ), (1, [('9062', '863260', '6Ph$5Tm-', '2El#7Vu&',\n '4Tb\"7Lr(', '78', '1912-06-09',\n '4Ss+1Rs\"', '1Pb#8Ea,', '1Ln\"4Dw(')])\n ),\n # street_number partial\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_number LIKE ? \",\n ('%420A%', ), (1, [('5148', '420A & 420B', '8Bu)9Yh(',\n '1Uc#4Sg$', '1Kl(8Tp,', '411',\n '1924-01-04', '0Ko+7Fs%', '7Hi$6Ug)',\n '7Vq$8Op%')])\n ),\n # street_number with non numeric chars\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_number LIKE ? \",\n ('%9088B-9022C%', ), (1, [('8139', '9088B-9022C', '9Gq,6Zi.',\n \"0Xg'0Zz'\", \"5Qq(8Td'\", '9',\n '1978-03-07', '', '', '')])\n ),\n # street_number empty\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \",\n ('%6265%', ), (1, [('6265', '', '', '', '4Nr.2Er%', 'REPAIR',\n '1959-08-06', '0Pc+8Tw*', '6Tz#6Ez!',\n '9Zo/3Tw+')])\n ),\n # street_number DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \",\n ('%9000%', ), (0, [])\n ),\n # street_name general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_name LIKE ? \",\n ('%9Uf\"9Uv(%', ), (1, [('8832', '30103', '9Uf\"9Uv(',\n '4Cr&5Bt/', '4Hp!8Jv*', '42AB/92CD',\n '1920-06-26', '9Mf#9Fr*', '9Nu+8Ur*',\n '7Cx!7Pj&')])\n ),\n # street_name DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE street_name LIKE ? \",\n ('%Main Street%', ), (0, [])\n ),\n # city general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE city LIKE ? \",\n ('%0Xi$4Gk,%', ), (1, [('4886', '468', \"1Fz'1Rp$\", '0Xi$4Gk,',\n '4Xp\"9Gv+', '327', '1945-04-25',\n '8Pv\"0Uw\"', \"8Kj'9Hi/\", '6Qg#2Kg.')])\n ),\n # city DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE city LIKE ? \",\n ('%San Francisco%', ), (0, [])\n ),\n # job_name general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_name LIKE ? \",\n ('%5Hd-9Em*%', ), (1, [('66463', '861542', '6Jy/2Sl#',\n '3Zk-3Vg/', '5Hd-9Em*', '61',\n '1978-01-29', '4Qp*4Nk(', '0Fp#8Ma#',\n '8Pq#2Iz)')])\n ),\n # job_name DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_name LIKE ? \",\n ('%Jobby Job%', ), (0, [])\n ),\n # head_number general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE head_number LIKE ? \",\n ('%934%', ), (1, [('7806', '982', '7Ca*9Yd)', '4Su&0Rd.',\n '0Hy*1Ac.', '934', '', '0Oc,9Vt&',\n '6Qo/7Ph+', '8As%9Jk*')])\n ),\n # head_number with non numeric chars\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE head_number LIKE ? \",\n ('%3/8GU%', ), (1, [('9220', '63', '7Xl\\\\\\'9Kc\"', '2Mh,6Oe-',\n '7Vh)3Ji+', '3/8GU', '1982-01-17',\n '1Qp$1Ok$', '4Dv#3Lj+', '4Ok\"6Zx!')])\n ),\n # head_number DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE head_number LIKE ? \",\n ('%36-24-99%', ), (0, [])\n ),\n # date general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE date LIKE ? \",\n ('%2007-08-29%', ), (1, [('85792', '621392', '0Br)3Kz%',\n '6Cx#6Dz-', '5Ql%3Mj!', '93',\n '2007-08-29', '5Fg&6Qh,', '',\n '2Ho.2Om\"')])\n ),\n # date empty\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE job_number LIKE ? \",\n ('%7806%', ), (1, [('7806', '982', '7Ca*9Yd)', '4Su&0Rd.',\n '0Hy*1Ac.', '934', '', '0Oc,9Vt&',\n '6Qo/7Ph+', '8As%9Jk*')])\n ),\n # date DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE date LIKE ? \",\n ('%2009-12-09%', ), (0, [])\n ),\n # general_contractor general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE general_contractor LIKE ? \",\n ('%8Wk(2Nf(%', ), (1, [('705', '123', '1Ts%1Jm+', '1Gw#1Xd,',\n '2Hd#6Vw)', '0', '', '8Wk(2Nf(',\n '9Tl+6Bx%', '5Pt#9Tl,')])\n ),\n # general_contractor DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE general_contractor LIKE ? \",\n ('%Mister%', ), (0, [])\n ),\n # estimator general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE estimator LIKE ? \",\n ('%5Xj$9Xw!%', ), (1, [('3222', '128', \"7Rk$8Gg'\", '4Ov%8Uu!',\n '4Qq\"6Hg&', '644', '1971-09-05',\n '3At!3Ka(', '5Xj$9Xw!', '4Vc+0Xm.')])\n ),\n # estimator DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE estimator LIKE ? \",\n ('%Lady%', ), (0, [])\n ),\n # designer general\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE designer LIKE ? \",\n ('%6Kq,9Pw)%', ), (1, [('101', '317620', '4Dq)3Ni!',\n \"9Iq.1Gv'\", '8So%9Zy)', '82',\n '1966-06-27', \"6Zo'9Qc/\", \"2Nm'2Yr+\",\n '6Kq,9Pw)')])\n ),\n # designer DNE\n (\n \"SELECT job_number, street_number, street_name, city, \"\n \"job_name,head_number, date, general_contractor, estimator, \"\n \"designer FROM job_list WHERE designer LIKE ? \",\n ('%cow%', ), (0, [])\n )\n )\n @unpack\n def testSix(self, query, terms, result):\n return self.assertEqual(query_db(query, terms), result)\n\n\ndef main():\n unittest.main(verbosity=2)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Allan/Tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"344814313","text":"import os\nimport io\nimport cssmin\nimport jsmin\nimport scss\n\nfrom django.conf import settings\nfrom django.core.exceptions import MiddlewareNotUsed\n\nFILE_CACHE = {}\n\nPROCESSORS = {\n '.css': (cssmin.cssmin, ),\n '.scss': (scss.Scss().compile, cssmin.cssmin, ),\n '.sass': (scss.Scss().compile, cssmin.cssmin, ),\n '.js': (jsmin.jsmin, )\n}\n\n\ndef process_file(filename, content):\n \"\"\"Process filename with any necessary processors and return the result.\"\"\"\n ext = os.path.splitext(filename)[-1]\n processors = PROCESSORS.get(ext, ())\n\n if ext in ('.sass', '.scss'):\n scss.config.STATIC_URL = settings.STATIC_URL\n scss.config.STATIC_ROOT = os.path.dirname(os.path.dirname(filename))\n scss.config.LOAD_PATHS = [os.path.dirname(filename)]\n\n for func in processors:\n content = func(content)\n\n return content + '\\n'\n\n\ndef get_file_content(filename):\n \"\"\"\n Get the file contents from the cache for the given filename.\n If it does not exist in the cache, then get the contents from disk and cache it.\n \"\"\"\n modified = os.path.getmtime(filename)\n cached = FILE_CACHE.get(filename, None)\n\n # If the filename is in the cache and the file has not changed then return cached content.\n # Also only return if the version on disk has not changed since the cached version.\n if cached:\n content = cached['content']\n if modified <= cached['date']:\n return content, False\n\n # But if it's not in the cache, read the file and add it to the cache.\n with open(filename, 'r') as handle:\n content = process_file(filename, handle.read())\n FILE_CACHE[filename] = {'date': modified, 'content': content}\n return content, True\n\n\nclass CompileAssetsMiddleware:\n \"\"\"\n A middleware class to compile and minify SCSS files and to minify JS files.\n Depends on COMPILED_ASSETS being set in project settings.\n This is designed to run only in development, so that when you push your\n project to production, the compiled assets can be served like normal.\n \"\"\"\n\n def __init__(self):\n if not settings.DEBUG:\n message = '{} only runes in DEBUG mode'.format(self.__class__)\n raise MiddlewareNotUsed(message)\n\n def process_request(self, request):\n self.compile()\n\n def compile(self):\n \"\"\"Process all the files in the given filedict using the necessary processors.\"\"\"\n filedict = getattr(settings, 'COMPILED_ASSETS', {})\n\n # The key for filedict is the output file, and the value is a list\n # of files that should contribute to that output file.\n for mainfile, filelist in filedict.items():\n rebuild = []\n output = io.StringIO()\n\n # Loop through each file in the set and keep track of whether any of them have changed.\n for filename in filelist:\n content, changed = get_file_content(filename)\n output.write(content)\n rebuild.append(changed)\n\n # Only rebuild the main file if some files in the set have changed.\n if any(rebuild):\n output = output.getvalue()\n with open(mainfile, 'w') as handle:\n handle.write(output)\n","sub_path":"project/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"602666129","text":"import requests\nfrom youtube_auth import api_key\n\nclass TopYoutube:\n\n\tdef discover(self):\n\n\t\tself.media = []\n\n\t\tpayload={\n\t\t\t\"part\" : \"snippet\",\n\t\t\t\"chart\" : \"mostPopular\",\n\t\t\t\"key\" : api_key,\n\t\t\t\"maxResults\" : 20\n\t\t}\n\n\t\tr = requests.get('https://www.googleapis.com/youtube/v3/videos', params=payload).json()\n\n\t\tfor video in r['items']:\n\t\t\tself.media.append({'name': video[\"snippet\"][\"title\"], 'link': \"http://www.youtube.com/watch?v=\" + video[\"id\"], 'description': video['snippet']['localized']['description']})\n\t\t\n\t\treturn self.media\n","sub_path":"Processing/top_youtube.py","file_name":"top_youtube.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"214571737","text":"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nclass Actor(nn.Module):\n \"\"\"\n Policy Model\n \"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"\n Initialize and build the policy network.\n Args:\n state_size (int): The Dimension of states\n action_size (int): The Dimension of Action Space\n seed (int): seed\n \"\"\"\n super(Actor, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.bn = nn.BatchNorm1d(state_size)\n self.fc1 = nn.Linear(state_size, 32)\n self.fc2 = nn.Linear(32, 64)\n self.fc3 = nn.Linear(64, 64)\n self.fc4 = nn.Linear(64, action_size)\n\n def forward(self, state):\n \"\"\"\n Forward path of the policy network.\n Args:\n state: tensor of states\n\n Returns:\n actions\n \"\"\"\n x = state\n x = self.bn(x)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return torch.tanh(self.fc4(x))\n\n\n\nclass Critic(nn.Module):\n \"\"\"\n Critic Model\n \"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"\n Initialize and build the critic network.\n Args:\n state_size (int): The Dimension of states\n action_size (int): The Dimension of Action Space\n seed (int): seed\n \"\"\"\n super(Critic, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fcState = nn.Linear(state_size, 32)\n self.fcAction = nn.Linear(action_size, 16)\n self.fc2 = nn.Linear(32+16, 128)\n self.fc3 = nn.Linear(128, 32)\n self.fc4 = nn.Linear(32, 1)\n\n def forward(self, state, action):\n \"\"\"\n Forward path of the critic network.\n Args:\n state: tensor of states\n action: tensor of actions\n\n Returns:\n qValue: Q(state,action)\n \"\"\"\n xState = F.relu(self.fcState(state))\n xAction = F.relu(self.fcAction(action))\n x = torch.cat((xState, xAction), 1)\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"604671550","text":"import os\nimport pickle\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\n\nX_all = np.array([[0, 0, 0, 0 ,0 ,0]])\ny_all = np.array([])\n# 列出 pickle 檔\ndir_path = './log/'\nfiles = os.listdir(dir_path)\n\nfor file in files:\n file_path = dir_path + file\n with open(file_path, 'rb') as f:\n data = pickle.load(f)\n # print(data)\n scene_info = data['ml_2P']['scene_info']\n command = data['ml_2P']['command']\n\n Ball_x = []\n Ball_y = []\n Ball_speed_x = []\n Ball_speed_y = []\n Direction = []\n Platform = []\n Command = []\n\n for i, s in enumerate(scene_info[1:-2]):\n Ball_x.append(s['ball'][0])\n Ball_y.append(s['ball'][1])\n Platform.append(s['platform_2P'][0])\n Ball_speed_x.append(scene_info[i+2][\"ball\"][0] - scene_info[i+1][\"ball\"][0])\n Ball_speed_y.append(scene_info[i+2][\"ball\"][1] - scene_info[i+1][\"ball\"][1])\n if Ball_speed_x[-1] > 0:\n if Ball_speed_y[-1] > 0:\n # 右下\n Direction.append(0)\n else:\n # 右上\n Direction.append(1)\n else:\n if Ball_speed_y[-1] > 0:\n # 左下\n Direction.append(2)\n else:\n # 左上\n Direction.append(3)\n \n for c in command[1:-2]:\n if c == \"NONE\":\n Command.append(0)\n elif c == \"MOVE_LEFT\":\n Command.append(-1)\n elif c == \"MOVE_RIGHT\":\n Command.append(1)\n numpy_data = np.array([Ball_x, Ball_y, Ball_speed_x, Ball_speed_y, Direction, Platform])\n X = np.transpose(numpy_data) \n y = command\n scene_info = data['ml_2P']['scene_info']\n command = data['ml_2P']['command']\n\n k = range(1, len(scene_info)-1)\n\n ball_x = np.array([scene_info[i]['ball'][0] for i in k])\n ball_y = np.array([scene_info[i]['ball'][1] for i in k])\n ball_speed_x = np.array([scene_info[i+1]['ball'][0] - scene_info[i]['ball'][0] for i in k])\n ball_speed_y = np.array([scene_info[i+1]['ball'][1] - scene_info[i]['ball'][1] for i in k])\n direction = np.where(np.vstack((ball_speed_x, ball_speed_y)) > 0, [[1],[0]], [[2],[3]]).sum(axis=0) # x y: ++1, +-4, -+2, --3\n platform = np.array([scene_info[i]['platform_2P'][0] for i in k])\n target = np.where(np.array(command) == 'NONE', 0,\n np.where(np.array(command) == 'MOVE_LEFT', -1, 1))[1:-1] # [0] SERVE_TO_RIGHT, [1897] None\n X = np.hstack((ball_x.reshape(-1, 1),\n ball_y.reshape(-1, 1),\n ball_speed_x.reshape(-1, 1),\n ball_speed_y.reshape(-1, 1),\n direction.reshape(-1, 1),\n platform.reshape(-1, 1)))\n y = target\n \n X_all = np.concatenate((X_all, X))\n y_all = np.append(y_all, y)\n\nX_all = np.delete(X_all, 0, axis=0)\n\nmodel = KNeighborsClassifier(n_neighbors=3)\nprint(model.fit(X_all, y_all))\nprint(model.score(X_all, y_all))\nwith open('my_model.pickle2', 'wb') as f:\n pickle.dump(model, f)","sub_path":"games/pingpong/ml/train_2P.py","file_name":"train_2P.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"379010298","text":"from django.shortcuts import render\n\nfrom myapp.settings import BASE_DIR\nfrom . import models\nfrom django.conf import settings\nfrom os import remove,chdir\nfrom django.core.files.storage import FileSystemStorage\ncurl=settings.CURRENT_URL\nmurl=settings.MEDIA_URL\ndef myadminhome(request):\n return render(request,\"myadminhome.html\",{'curl':curl})\n\ndef addcat(request):\n query1=\"select * from catagory\"\n models.cursor.execute(query1)\n clist=models.cursor.fetchall()\n if request.method==\"GET\":\n return render(request,\"addcat.html\",{'curl':curl,\"output\":'',\"clist\":clist})\n else:\n catnm=request.POST.get('catnm')\n caticon=request.FILES['caticon']\n fs=FileSystemStorage()\n filename=fs.save(caticon.name,caticon)\n query=\"insert into catagory values(NULL,'%s','%s')\" %(catnm,filename)\n models.cursor.execute(query)\n models.db.commit()\n query2 = \"select * from catagory\"\n models.cursor.execute(query2)\n clist = models.cursor.fetchall()\n return render(request,\"addcat.html\",{\"curl\":curl,\"output\":\"Catagory added successfully ....\",\"clist\":clist})\n\n\ndef addsubcat(request):\n query1 = \"select * from catagory\"\n models.cursor.execute(query1)\n clist = models.cursor.fetchall()\n if request.method=='GET':\n return render(request,\"addsubcat.html\",{'curl':curl,'clist':clist})\n else:\n subcatnm=request.POST.get('subcatnm')\n catnm=request.POST.get('catnm')\n subcaticon=request.FILES['subcaticon']\n fs=FileSystemStorage()\n filename=fs.save(subcaticon.name,subcaticon)\n query=\"insert into subcatagory values(NULL,'%s','%s','%s')\" %(catnm,subcatnm,filename)\n models.cursor.execute(query)\n models.db.commit()\n return render(request,\"addsubcat.html\",{\"curl\":curl,\"output\":\"Sub Catagory added successfully ....\",\"clist\":clist})\n\n\n\ndef managecat(request):\n query1=\"select * from catagory\"\n models.cursor.execute(query1)\n clist=models.cursor.fetchall()\n if request.method=='GET':\n return render(request,\"managecat.html\",{'curl':curl,\"output\":'','clist':clist})\n else:\n catnm=request.POST.get('catnm')\n query4=\"select * from catagory WHERE catnm = '%s' \"%(catnm)\n models.cursor.execute(query4)\n cat=models.cursor.fetchall()\n caticon=cat[0][2]\n print(caticon)\n print(type(caticon))\n query2=\"DELETE FROM catagory WHERE catnm = '%s' \"%(catnm)\n models.cursor.execute(query2)\n models.db.commit()\n try:\n chdir(\"media\")\n remove(caticon)\n chdir(\"..\")\n print(\"File Deleted\")\n except:\n print(\"File is not placed on the location\")\n query3 = \"select * from catagory\"\n models.cursor.execute(query3)\n clist = models.cursor.fetchall()\n return render(request,\"managecat.html\",{'curl':curl,\"output\":catnm+' Deleted....','clist':clist})\n\ndef managesubcat(request):\n query1=\"select * from catagory\"\n models.cursor.execute(query1)\n clist=models.cursor.fetchall()\n query2=\"select * from subcatagory\"\n models.cursor.execute(query2)\n subcatlist=models.cursor.fetchall()\n if request.method=='GET':\n return render(request,\"managesubcat.html\",{'curl':curl,\"output\":'','clist':clist,'subcatlist':subcatlist})\n else:\n subcatnm=request.POST.get('subcatnm')\n query3=\"select * from subcatagory WHERE subcatnm = '%s' \"%(subcatnm)\n models.cursor.execute(query3)\n subcat=models.cursor.fetchall()\n subcaticon=subcat[0][3]\n print(subcaticon)\n print(type(subcaticon))\n query4=\"DELETE FROM subcatagory WHERE subcatnm = '%s' \"%(subcatnm)\n models.cursor.execute(query4)\n models.db.commit()\n try:\n chdir(\"media\")\n remove(subcaticon)\n chdir(\"..\")\n print(\"File Deleted\")\n except:\n print(\"File is not placed on the location\")\n return render(request,\"managesubcat.html\",{'curl':curl,\"output\":subcatnm+' Deleted....','clist':clist,'subcatlist':subcatlist})\n\n","sub_path":"myapp/myadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444931132","text":"import requests\nimport json\n\ndef main():\n\n url = \"https://slack.com/api/conversations.history\"\n token = \"xxx\"\n channel_id = \"C0100M822JY\"\n\n payload = {\n \"token\": token,\n \"channel\": channel_id\n }\n response = requests.get(url, params=payload)\n json_data = response.json()\n messages = json_data[\"messages\"]\n for i in messages[:10]:\n print(i[\"text\"])\n\nmain()\n","sub_path":"exec_from_php.py","file_name":"exec_from_php.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"214267341","text":"from copy import deepcopy\nfrom unittest import TestCase\n\nfrom lxml import html\nimport requests\n\n\n__author__ = 'icoz'\n\n\ndef get_pages(doc):\n pages_data = doc.xpath(\"//ul[@id='nav-pages']//a[last()]\")\n # print(pages_data)\n if len(pages_data) > 0:\n pg_text = str(pages_data[-1].attrib['href']).split('/')[-2]\n # print('pages=',pg_text[4:])\n pages = int(pg_text[4:])\n else:\n pages = 1\n return pages\n\n\nclass HabraUser(object):\n def __init__(self, username, need_favorites=False, need_user_posts=False):\n self._username = username\n self._user = dict()\n self._user_karma = dict()\n self._user_profile = dict()\n self._user_activity = dict()\n\n req_data = requests.get(self._genUrlForUsername(username)).text\n self._doc = html.document_fromstring(req_data)\n self._parseUserpage()\n self._user_favorites = dict()\n self._user_favorites_loaded = need_favorites\n if need_favorites:\n self._user_favorites = self._getFavorites()\n self._user_posts = dict()\n self._user_posts_loaded = need_user_posts\n if need_user_posts:\n self._user_posts = self._getUserPosts()\n\n def favorites(self):\n \"\"\"\n Returns dict by name of topic_id\n\n :param username:\n string of username, ex. 'some_user'\n :return:\n dict(name) = id\n \"\"\"\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)\n\n def user_posts(self):\n if not self._user_posts_loaded:\n self._user_posts = self._getUserPosts()\n self._user_posts_loaded = True\n return deepcopy(self._user_posts)\n\n def profile(self):\n return deepcopy(self._user_profile)\n\n def activity(self):\n return deepcopy(self._user_activity)\n\n def karma(self):\n return deepcopy(self._user_karma)\n\n def _genFavoritesUrlByUser(self, username):\n '''\n Generates favirites URL using username\n\n :param id:\n string with username\n :return:\n string with URL\n '''\n return self._genUrlForUsername(username) + 'favorites/'\n # 'http://habrahabr.ru/users/{}/favorites'.format(username)\n\n def _genUrlForUsername(self, username):\n '''\n Generates user-page URL using username\n\n :param id:\n string with username\n :return:\n string with URL\n '''\n return 'http://habrahabr.ru/users/{}/'.format(username)\n\n\n def _getUserCompanyList(self):\n out = []\n cmpns = self._doc.xpath(\"//div[@class='user_profile']/dl[@id='favorite_companies_list']//a\")\n for company in cmpns:\n out.append((company.text, company.attrib['href']))\n return out\n\n\n def _getUserHubList(self):\n out = []\n hubs = self._doc.xpath(\"//div[@class='user_profile']/dl[@class='hubs_list']//a[@class='cross']\")\n for hub in hubs:\n out.append((hub.text, hub.attrib['href']))\n return out\n\n\n def _parseUserpage(self):\n\n p_tags = self._doc.xpath(\"//div[@class='user_profile']//ul[@id='people-tags']//a/span\")\n registration_date = self._doc.xpath(\"//div[@class='user_profile']//dd[@class='grey']\")[0].text\n\n self._user['username'] = self._doc.xpath(\"//div[@class='user_header']/h2/a\").pop().text\n self._user_karma['karma'] = float(\n self._doc.xpath(\"//div[@class='karma']//div[@class='num']\").pop().text.replace(',', '.'))\n self._user_karma['karma_vote'] = int(\n self._doc.xpath(\"//div[@class='karma']/div[@class='votes']\").pop().text.split(' ')[0])\n self._user_karma['rating'] = float(\n self._doc.xpath(\"//div[@class='rating']/div[@class='num']\").pop().text.replace(',', '.'))\n self._user_profile['fullname'] = self._doc.xpath(\n \"//div[@class='user_profile']/div[@class='fullname']\").pop().text.strip()\n self._user_karma['rating_place'] = int(\n self._doc.xpath(\"//div[@class='user_profile']/div[@class='rating-place']\").pop().text.split('-')[0])\n if len(self._doc.xpath(\"//div[@class='user_profile']//dd[@class='bday']\")):\n self._user_profile['birthday'] = self._doc.xpath(\"//div[@class='user_profile']//dd[@class='bday']\")[0].text\n if len(self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='country-name']\")):\n self._user_profile['country'] = self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='country-name']\")[0].text\n if len(self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='region']\")):\n self._user_profile['region'] = self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='region']\")[0].text\n if len(self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='city']\")):\n self._user_profile['city'] = self._doc.xpath(\"//div[@class='user_profile']//dd/a[@class='city']\")[0].text\n self._user_profile['people_tags'] = [i for i in map(lambda x: x.text, p_tags)]\n self._user_profile['registraion_date'] = registration_date[:registration_date.index('\\r\\n')]\n\n self._user_activity['followers_count'] = int(\n self._doc.xpath(\"//div[@class='stats']/div[@id='followers_count']/a\").pop().text.split(' ')[0])\n self._user_activity['posts_count'] = int(\n self._doc.xpath(\"//div[@class='stats']/div[@class='item posts_count']/a\").pop().text.split(' ')[0])\n self._user_activity['comments_count'] = int(\n self._doc.xpath(\"//div[@class='stats']/div[@class='item comments_count']/a\").pop().text.split(' ')[0])\n\n self._user['company_list'] = self._getUserCompanyList()\n self._user['hubs_list'] = self._getUserHubList()\n self._user['profile'] = self._user_profile\n self._user['activity'] = self._user_activity\n self._user['karma'] = self._user_karma\n\n\n def _getFavorites(self):\n \"\"\"\n Returns dict by name of topic_id\n\n :param username:\n string of username, ex. 'some_user'\n :return:\n dict(name) = id\n \"\"\"\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out\n\n def _getUserPosts(self):\n url = self._genUrlForUsername(self._username) + 'topics/'\n req = requests.get(url)\n if req.status_code != 200:\n raise IOError('doc not found. URL = {}'.format(url))\n doc = html.document_fromstring(req.text)\n out = dict()\n pages = get_pages(doc)\n posts = doc.xpath(\"//div[@class='posts_list']//a[@class='post_title']\")\n for f in posts:\n # print(f.text)\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n # out[f.text] = f.attrib['href'][-7:-1]\n for p in range(2, pages):\n url = self._genUrlForUsername(self._username) + 'topics/page{0}/'.format(p)\n req = requests.get(url)\n if req.status_code != 200:\n raise IOError('doc not found. URL = {}'.format(url))\n doc = html.document_fromstring(req.text)\n posts = doc.xpath(\"//div[@class='posts_list']//a[@class='post_title']\")\n for f in posts:\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out\n\n\nimport pprint\n\n\nclass Test_HabraUser(TestCase):\n def setUp(self):\n self.hu = HabraUser('icoz')\n pass\n\n def test_parseUserpage(self):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(self.hu.activity())\n pp.pprint(self.hu.profile())\n pp.pprint(self.hu.karma())\n\n def test_favs(self):\n pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint(self.hu.favorites())\n\n def test_user_posts(self):\n hu = HabraUser('Zelenyikot')\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint('userposts=')\n pp.pprint(hu.user_posts())\n # out = getFavForUsername('icoz')\n # pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint(out)\n\n","sub_path":"habr/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":9035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57312037","text":"\n\nfrom pdf2image import convert_from_path\nimport tempfile\nimport PIL.Image as Image\nimport pytesseract\nimport cv2\nimport object_detection.export_inference_graph\nfrom stanfordcorenlp import StanfordCoreNLP\nimport re\n\nocr_paths=[]\n\ndef main(filename, outputDir):\n print('filename=', filename)\n print('outputDir=', outputDir)\n with tempfile.TemporaryDirectory() as path:\n images = convert_from_path(filename)\n for index, img in enumerate(images):\n if index >2:\n break\n image_path='%s/page_%s.png' % (outputDir, index)\n\n ocr_paths.append(image_path)\n img.save(image_path)\n\ndef get_abs(text):\n abs_num=text.lower().find(\"abstract\")\n if abs_num!=-1:\n keywords_num=text.lower().find(\"keywords\")\n if keywords_num!=-1:\n if keywords_num 500:\n num = section.rfind(\".\")\n if num > 500:\n return abs_clear(section[:num + 1])\n else:\n return abs_clear(text[abs_num+8:keywords_num])\n else:\n # print(text)\n abs=\"\"\n for section in get_sections(text[abs_num+8:]):\n if section.__len__()>500:\n abs=section\n break\n else:\n abs+=section+\"\\n\"\n if abs.__len__()>500:\n break\n return abs_clear(abs)\n else:\n for section in get_sections(text):\n if section.__len__()>500:\n num=section.rfind(\".\")\n if num >500:\n return abs_clear(section[:num+1])\n\ndef get_sections(text):\n return text.split(\"\\n\\n\")\n\ndef abs_clear(abs):\n print(\"clear---------------\",abs)\n abs=abs.strip()\n if abs[0] == \":\":\n abs = abs[1:]\n print(\"last char:\",abs[-1])\n if abs[-1] !=\".\" and abs[-1] !=\"。\":\n abs=abs+\".\"\n return abs\n\ndef create_box(pdf_path):\n path=\"C:/temp/box/\"\n images = convert_from_path(pdf_path)\n for index, img in enumerate(images):\n image_path=path+\"r\"+str(index)+\".png\"\n img.save(image_path)\n image = cv2.imread(image_path)\n box = pytesseract.image_to_data(image)\n for line in box.split(\"\\n\"):\n # print(line)\n if \"left\" in line:\n continue\n args = line.split(\"\\t\")\n if int(args[3]) != 0:\n continue\n\n cv2.rectangle(image, (int(args[6]), int(args[7])),\n (int(args[6]) + int(args[8]), int(args[7]) + int(args[9])), (255, 0, 0))\n cv2.imwrite(image_path, image)\n\n\n\ndef find_boxs(boxs,image_path):\n # print(pytesseract.image_to_data(\"C:/temp/png/page_0.jpg\"))\n image_path=\"D:/data/temp/page_0.jpg\"\n path=\"D:/data/temp\"\n image = cv2.imread(image_path)\n box = pytesseract.image_to_data(image)\n for index,line in enumerate(box.split(\"\\n\")):\n # print(line)\n if \"left\" in line:\n continue\n args = line.split(\"\\t\")\n if int(args[3]) != 0:\n continue\n if int(args[2])==0:\n continue\n print(\"++++++++++++++++++++++\",index)\n print(\"======================\",args)\n A=(int(args[6]),int(args[7]))\n\n B=(int(args[6]) + int(args[8]), int(args[7]) + int(args[9]))\n print(int(args[6]),int(args[7]),int(args[6]) + int(args[8]),int(args[7]) + int(args[9]))\n new_image=image[int(args[7]):int(args[7]) + int(args[9]),int(args[6]):int(args[6]) + int(args[8])]\n # print(pytesseract.image_to_string(new_image))\n cv2.imwrite(path+str(index)+\".jpg\",new_image)\n # cv2.rectangle(image, (int(args[6]), int(args[7])),\n # (int(args[6]) + int(args[8]), int(args[7]) + int(args[9])), (255, 0, 0))\n\n\n\n\n\n\n\n\ndef test():\n image_path = \"C:/temp/png/page_0.jpg\"\n image = cv2.imread(image_path)\n new_image = image[200:300, 400:1000]\n print(pytesseract.image_to_string(new_image))\n cv2.imwrite( \"C:/temp/png/1.jpg\", new_image)\n\n\n\ndef nlp():\n nlp = StanfordCoreNLP(r'C:\\File\\stanford-corenlp-full-2016-10-31')\n\n sentence = \"Carlos Navarro-Retamal andJulio Caballero *\"\n print('Tokenize:', nlp.word_tokenize(sentence))\n print('Part of Speech:', nlp.pos_tag(sentence))\n print( 'Named Entities:', nlp.ner(sentence))\n print('Constituency Parsing:', nlp.parse(sentence))\n\n print('Dependency Parsing:', nlp.dependency_parse(sentence))\n\n\n\n\nif __name__ == \"__main__\":\n # create_box(\"C:/pdfs/dynamic/1a2dd1fa5d0511e9a9ca00ac37466cf9.pdf\")\n # find_boxs(None,None)\n # nlp()\n # test()\n # # print(\"a\".isalpha())\n # main('C:/temp/新建文件夹/0kKzRNRpJayT.pdf', 'C:/temp')\n # for path in ocr_paths:\n # print(\"===========\",path)\n # print(pytesseract.image_to_string(path,lang=\"jpn\"))\n # print(\"+++++++++\",get_abs(pytesseract.image_to_string(path,lang=\"jpn\")))\n # print(pytesseract.image_to_string(\"C:/temp/12.png\",lang=\"jpn\"))\n # box=pytesseract.image_to_data(\"C:/temp/page_0.png\")\n # image=cv2.imread(\"C:/temp/page_0.png\")\n #\n # print(type(box))\n # block_dict={}\n # for line in box.split(\"\\n\"):\n # # print(line)\n # if \"left\" in line:\n # continue\n # args=line.split(\"\\t\")\n # if int(args[3])!=0:\n # continue\n #\n # # block_num=args[2]\n # # num1=int(args[6])\n # # num2=int(args[6])+int(args[8])\n # # num3=int(args[7]\n # # num4=args[2]\n # cv2.rectangle(image,(int(args[6]),int(args[7])),(int(args[6])+int(args[8]),int(args[7])+int(args[9])),(255, 0, 0))\n # # print(args.__len__())\n #\n # # cv2.imshow(\"Text Detection\", image)\n # #\n # # cv2.waitKey(0)\n # cv2.imwrite(\"C:/temp/r1.png\",image)\n\n line=\"M . K o ua k ou D je , S . D a b on n e, S. Ta g ro G u eh i a n d L . P a tr ic e K o u am e\"\n pattern = \"[A-Z]\"\n new_string = re.sub(pattern, lambda x: \" \" + x.group(0),line.replace(\" a n d \",\" A n d \").replace(\" \",\"\"))\n print(new_string)\n","sub_path":"ocr/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"30469906","text":"\"\"\"empty message\n\nRevision ID: 7dd3c479c048\nRevises: ab996d2365af\nCreate Date: 2017-03-01 03:54:18.283388\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7dd3c479c048'\ndown_revision = 'ab996d2365af'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('protocoltypes', sa.Column('modify_group', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('protocoltypes', 'modify_group')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/7dd3c479c048_.py","file_name":"7dd3c479c048_.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"172024654","text":"#!/usr/bin/env python\n\nimport json\nimport urllib2\n\nexecfile('config.py')\n\ntotal_orgs = 1\nwhile total_orgs > 0:\n\n response = urllib2.urlopen(popit_api_url + '/v0.1/organizations')\n orglist = json.loads(response.read())\n\n for org in orglist['result']:\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n request = urllib2.Request(popit_api_url + '/v0.1/organizations/' + org['id'])\n request.add_header(\"Apikey\", popit_api_key)\n request.get_method = lambda: 'DELETE'\n url = opener.open(request)\n\n total_orgs = orglist['total']\n\ntotal_people = 1\nwhile total_people > 0:\n\n response = urllib2.urlopen(popit_api_url + '/v0.1/persons')\n personlist = json.loads(response.read())\n\n for person in personlist['result']:\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n request = urllib2.Request(popit_api_url + '/v0.1/persons/' + person['id'])\n request.add_header(\"Apikey\", popit_api_key)\n request.get_method = lambda: 'DELETE'\n url = opener.open(request)\n\n total_people = personlist['total']\n\ntotal_posts = 1\nwhile total_posts > 0:\n\n response = urllib2.urlopen(popit_api_url + '/v0.1/posts')\n postslist = json.loads(response.read())\n\n for post in postslist['result']:\n opener = urllib2.build_opener(urllib2.HTTPHandler)\n request = urllib2.Request(popit_api_url + '/v0.1/posts/' + post['id'])\n request.add_header(\"Apikey\", popit_api_key)\n request.get_method = lambda: 'DELETE'\n url = opener.open(request)\n\n total_posts = postslist['total']\n","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444409733","text":"import random\nimport numpy as np\nfrom rage.Building import Building\n\n\n\nclass Office (Building):\n def info_office(self):\n print (\"Данные по офису: \")\n return '\\n'\n\n def size_office(self):\n office_1 = 1\n length = np.empty(office_1)\n width = np.empty(office_1)\n for i in range(office_1):\n length[i] = random.randint(1, 15)\n width[i] = random.randint(1, 15)\n print(\"Длинна комнаты №1:\", length[i], \" ширина комнаты №1:\", width[i])\n print('Периметр:', length[i] * length[i])\n\n office_2 = 1\n length = np.empty(office_2)\n width = np.empty(office_2)\n for i in range(office_2):\n length[i] = random.randint(1, 15)\n width[i] = random.randint(1, 15)\n print(\"Длинна комнаты №2:\", length[i], \" ширина комнаты №1:\", width[i])\n print('Периметр:', length[i] * length[i])\n\n\n print('Общая площадь офисса:' + str(np.dot(length, width)))\n\n return '\\n'\n\n","sub_path":"rage/Office.py","file_name":"Office.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"326145416","text":"\"\"\"TensorFlow utility methods.\"\"\"\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nfrom functools import reduce\n\n# Stabilizing term to avoid NaN (prevents division by zero or log of zero)\nEPS = 1e-6\n\n\ndef make_session(num_cpu, graph=None):\n \"\"\"Return a session that will use CPU's only.\n\n Parameters\n ----------\n num_cpu : int\n number of CPUs to use for TensorFlow\n graph : tf.Graph\n the graph of the session\n\n Returns\n -------\n tf.compat.v1.Session\n a tensorflow session\n \"\"\"\n tf_config = tf.compat.v1.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n\n # Prevent tensorflow from taking all the gpu memory.\n tf_config.gpu_options.allow_growth = True\n\n return tf.compat.v1.Session(config=tf_config, graph=graph)\n\n\ndef get_trainable_vars(name=None):\n \"\"\"Return the trainable variables.\n\n Parameters\n ----------\n name : str\n the scope\n\n Returns\n -------\n list of tf.Variable\n trainable variables\n \"\"\"\n return tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=name)\n\n\ndef get_globals_vars(name=None):\n \"\"\"Return the global variables.\n\n Parameters\n ----------\n name : str\n the scope\n\n Returns\n -------\n list of tf.Variable\n global variables\n \"\"\"\n return tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=name)\n\n\ndef reduce_std(tensor, axis=None, keepdims=False):\n \"\"\"Get the standard deviation of a Tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor or tf.Variable\n the input tensor\n axis : int or list of int\n the axis to itterate the std over\n keepdims : bool\n keep the other dimensions the same\n\n Returns\n -------\n tf.Tensor\n the std of the tensor\n \"\"\"\n return tf.sqrt(reduce_var(tensor, axis=axis, keepdims=keepdims))\n\n\ndef reduce_var(tensor, axis=None, keepdims=False):\n \"\"\"Get the variance of a Tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n the input tensor\n axis : int or list of int\n the axis to itterate the variance over\n keepdims : bool\n keep the other dimensions the same\n\n Returns\n -------\n tf.Tensor\n the variance of the tensor\n \"\"\"\n tensor_mean = tf.reduce_mean(tensor, axis=axis, keepdims=True)\n devs_squared = tf.square(tensor - tensor_mean)\n return tf.reduce_mean(devs_squared, axis=axis, keepdims=keepdims)\n\n\ndef get_target_updates(_vars, target_vars, tau, verbose=0):\n \"\"\"Get target update operations.\n\n Parameters\n ----------\n _vars : list of tf.Tensor\n the initial variables\n target_vars : list of tf.Tensor\n the target variables\n tau : float\n the soft update coefficient (keep old values, between 0 and 1)\n verbose : int\n the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n\n Returns\n -------\n tf.Operation\n initial update\n tf.Operation\n soft update\n \"\"\"\n if verbose >= 2:\n print('setting up target updates ...')\n\n soft_updates = []\n init_updates = []\n assert len(_vars) == len(target_vars)\n\n for var, target_var in zip(_vars, target_vars):\n if verbose >= 2:\n print(' {} <- {}'.format(target_var.name, var.name))\n init_updates.append(tf.compat.v1.assign(target_var, var))\n soft_updates.append(\n tf.compat.v1.assign(target_var, (1.-tau) * target_var + tau * var))\n\n assert len(init_updates) == len(_vars)\n assert len(soft_updates) == len(_vars)\n\n return tf.group(*init_updates), tf.group(*soft_updates)\n\n\ndef gaussian_likelihood(input_, mu_, log_std):\n \"\"\"Compute log likelihood of a gaussian.\n\n Here we assume this is a Diagonal Gaussian.\n\n Parameters\n ----------\n input_ : tf.Variable\n the action by the policy\n mu_ : tf.Variable\n the policy mean\n log_std : tf.Variable\n the policy log std\n\n Returns\n -------\n tf.Variable\n the log-probability of a given observation given the output action\n from the policy\n \"\"\"\n pre_sum = -0.5 * (((input_ - mu_) / (\n tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(\n 2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)\n\n\ndef apply_squashing_func(mu_, pi_, logp_pi):\n \"\"\"Squash the output of the Gaussian distribution.\n\n This method also accounts for that in the log probability. The squashed\n mean is also returned for using deterministic actions.\n\n Parameters\n ----------\n mu_ : tf.Variable\n mean of the gaussian\n pi_ : tf.Variable\n output of the policy (or action) before squashing\n logp_pi : tf.Variable\n log probability before squashing\n\n Returns\n -------\n tf.Variable\n the output from the squashed deterministic policy\n tf.Variable\n the output from the squashed stochastic policy\n tf.Variable\n the log probability of a given squashed action\n \"\"\"\n # Squash the output\n deterministic_policy = tf.nn.tanh(mu_)\n policy = tf.nn.tanh(pi_)\n\n # Squash correction (from original implementation)\n logp_pi -= tf.reduce_sum(tf.math.log(1 - policy ** 2 + EPS), axis=1)\n\n return deterministic_policy, policy, logp_pi\n\ndef apply_squashing_func_2(mu, pi, logp_pi):\n # Adjustment to log prob\n # NOTE: This formula is a little bit magic. To get an understanding of where it\n # comes from, check out the original SAC paper (arXiv 1801.01290) and look in\n # appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n logp_pi -= tf.reduce_sum(2*(np.log(2) - pi - tf.nn.softplus(-2*pi)), axis=1)\n\n # Squash those unbounded actions!\n mu = tf.tanh(mu)\n pi = tf.tanh(pi)\n return mu, pi, logp_pi\n\n\ndef print_params_shape(scope, param_type):\n \"\"\"Print parameter shapes and number of parameters.\n\n Parameters\n ----------\n scope : str\n scope containing the parameters\n param_type : str\n the name of the parameter\n \"\"\"\n shapes = [var.get_shape().as_list() for var in get_trainable_vars(scope)]\n nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in shapes])\n print(' {} shapes: {}'.format(param_type, shapes))\n print(' {} params: {}'.format(param_type, nb_params))\n\n\ndef layer(val,\n num_outputs,\n name,\n act_fun=None,\n kernel_initializer=slim.variance_scaling_initializer(\n factor=1.0 / 3.0, mode='FAN_IN', uniform=True),\n layer_norm=False):\n \"\"\"Create a fully-connected layer.\n\n Parameters\n ----------\n val : tf.Variable\n the input to the layer\n num_outputs : int\n number of outputs from the layer\n name : str\n the scope of the layer\n act_fun : tf.nn.* or None\n the activation function\n kernel_initializer : Any\n the initializing operation to the weights of the layer\n layer_norm : bool\n whether to enable layer normalization\n\n Returns\n -------\n tf.Variable\n the output from the layer\n \"\"\"\n val = tf.layers.dense(\n val, num_outputs, name=name, kernel_initializer=kernel_initializer)\n\n if layer_norm:\n val = tf.contrib.layers.layer_norm(val, center=True, scale=True)\n\n if act_fun is not None:\n val = act_fun(val)\n\n return val\n","sub_path":"hbaselines/utils/tf_util.py","file_name":"tf_util.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"48234152","text":"# -*- coding: utf-8 -*-\n\n\n\nfrom django import forms\nfrom rbac import models\nfrom django.utils.safestring import mark_safe\nfrom rbac.forms.baseform import BaseForm\n\n\nICON_LIST = [\n\t['fa-calendar-plus-o',mark_safe('')],\n\t['fa-calendar-times-o',mark_safe('')],\n\t['fa-bug',mark_safe('')],\n\t['fa-bookmark-o',mark_safe('')],\n\t['fa-bus',mark_safe('')],\n\t['fa-cogs',mark_safe('')],\n\t['fa-copyright',mark_safe('')],\n\t['fa-envelope-open',mark_safe('')],\n\t['fa-crosshairs',mark_safe('')],\n\t['fa-flag',mark_safe('')],\n\t['fa-image',mark_safe('')],\n\t['fa-life-ring',mark_safe('')],\n\t['fa-pie-chart',mark_safe('')],\n\t['fa-road',mark_safe('')],\n]\n\n\n\nclass MenuModelForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Menu\n\t\tfields = ['title','icon']\n\t\twidgets = {\n\t\t\t'title':forms.TextInput(attrs={'class':'form-control'}),\n\t\t\t'icon':forms.RadioSelect(\n\t\t\t\tchoices=ICON_LIST,\n\t\t\t\tattrs={'class':'clearfix'}\n\n\t\t\t)\n\t\t}\n\n\nclass SecondMenuModelForm(BaseForm):\n\tclass Meta:\n\t\tmodel = models.Permission\n\t\texclude = ['pid',]\n\nclass PermissionModelForm(BaseForm):\n\tclass Meta:\n\t\tmodel = models.Permission\n\t\tfields = ['title','url','name']\n\n\n\nclass MultiAddPermissionForm(forms.Form):\n\ttitle = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\turl = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\tname = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\tmenu_id = forms.ChoiceField(\n\t\tchoices=[(None, '-----')],\n\t\twidget=forms.Select(attrs={'class': \"form-control\"}),\n\t\trequired=False,\n\t)\n\tpid_id = forms.ChoiceField(\n\t\tchoices=[(None, '-----')],\n\t\twidget=forms.Select(attrs={'class': \"form-control\"}),\n\t\trequired=False,\n\t)\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.fields['menu_id'].choices += models.Menu.objects.values_list('id', 'title')\n\t\tself.fields['pid_id'].choices += models.Permission.objects.filter(pid__isnull=True).exclude(\n\t\t\tmenu__isnull=True).values_list('id', 'title')\n\n\n\nclass MultiEditPermissionForm(forms.Form):\n\tid = forms.IntegerField(\n\t\twidget=forms.HiddenInput()\n\t)\n\ttitle = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\turl = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\tname = forms.CharField(\n\t\twidget=forms.TextInput(attrs={'class': \"form-control\"})\n\t)\n\tmenu_id = forms.ChoiceField(\n\t\tchoices=[(None, '-----')],\n\t\twidget=forms.Select(attrs={'class': \"form-control\"}),\n\t\trequired=False,\n\t)\n\tpid_id = forms.ChoiceField(\n\t\tchoices=[(None, '-----')],\n\t\twidget=forms.Select(attrs={'class': \"form-control\"}),\n\t\trequired=False,\n\t)\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(*args, **kwargs)\n\t\tself.fields['menu_id'].choices += models.Menu.objects.values_list('id', 'title')\n\t\tself.fields['pid_id'].choices += models.Permission.objects.filter(pid__isnull=True).exclude(\n\t\t\tmenu__isnull=True).values_list('id', 'title')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"seventh_module/CRM_NEW/cmdb/rbac/forms/menuform.py","file_name":"menuform.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396138620","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 23 21:00:13 2019\n\nmodule: experiment module - this is the module that runs everything and controls kv's\n\n@author: young\n\"\"\"\n\n\nimport pandas as pd\nimport pyomo.environ as pe\nimport time\nimport math\nimport os.path\nfrom datetime import datetime\nimport multiprocessing\nimport pickle as pkl\nimport sys\nimport numpy as np\nfrom random import randrange\n\n#report the clock time that the experiment was started\nprint(\"Experiment commenced at: \", time.ctime())\nstart=time.time()\n\nfrom CreateModel import model\nimport CreateModel as crtmod\nimport BoundsPyomo as bdypy\nimport UniversalInputs as uinp\nimport PropertyInputs as pinp\nimport Sensitivity as sen\nimport Functions as fun\nimport RotationPyomo as rotpy\nimport CropPyomo as crppy\nimport MachPyomo as macpy\nimport FinancePyomo as finpy\nimport LabourFixedPyomo as lfixpy\nimport LabourPyomo as labpy\nimport LabourCropPyomo as lcrppy\nimport PasturePyomo as paspy\nimport SupFeedPyomo as suppy\nimport StubblePyomo as stubpy\nimport StockPyomo as spy\nimport CorePyomo as core\n\nstart_time1 = time.time()\n\n\n#########################\n#load pickle # \n#########################\n##try to load in params dict, if it doesn't exist then create a new dict\ntry:\n with open('pkl_params.pkl', \"rb\") as f:\n params = pkl.load(f)\nexcept FileNotFoundError:\n params={}\nprev_params = params.copy() #make a copy to compare with\n##try to load in Previous Exp.xlsx file to dict, if it doesn't exist then create a new dict\ntry:\n with open('pkl_exp.pkl', \"rb\") as f:\n prev_exp = pkl.load(f)\nexcept FileNotFoundError:\n prev_exp=pd.DataFrame()\n\nif __name__ == '__main__':\n ##try to load in results file to dict, if it doesn't exist then create a new dict - isn't used by multiprocess therefore only needs to be loaded with main\n try:\n with open('pkl_lp_vars.pkl', \"rb\") as f:\n lp_vars = pkl.load(f)\n except FileNotFoundError:\n lp_vars={}\n ##try to load in results file to dict, if it doesn't exist then create a new dict\n try:\n with open('pkl_r_vals.pkl', \"rb\") as f:\n r_vals = pkl.load(f)\n except FileNotFoundError:\n r_vals={}\n\n \n#########################\n#load exp # \n#########################\n##read in exp and drop all false runs ie runs not being run this time\nexp_data = fun.f_read_exp()\nexp_data = exp_data.sort_index() #had to sort to stop performance warning, this means runs may not be executed in order of exp.xlsx\nexp_data1=exp_data.copy() #copy made so that the run col can be added - the original df is used to allocate sa values (would cause an error if run col existed but i cant drop it because it is used to determine if the trial is run)\n\n\n\n##check if precalcs and pyomo need to be recalculated.\n##precalcs are rerun if\n## 1. exp.xlsx has changed\n## 2. any python module has been updated\n## 3. the trial needed to be run last time but the user opted not to run that trial\n\nexp_data1 = fun.f_run_required(prev_exp, exp_data1)\n\n\n#########################\n#Exp loop #\n#########################\n#^maybe there is a cleaner way to do some of the stuff below ie a way that doesn't need as many if statements?\ndef exp(row):\n ##sleep for random length of time. This is to offset processes with a goal of spreading the RAM load\n time.sleep(randrange(80))\n\n ##start timer for each loop\n start_time = time.time()\n for dic,key1,key2,indx in exp_data:\n ##extract current value\n value = exp_data.loc[exp_data.index[row], (dic,key1,key2,indx)]\n ##checks if both slice and key2 exists\n if not ('Unnamed' in indx or 'Unnamed' in key2):\n indices = tuple(slice(*(int(i) if i else None for i in part.strip().split(':'))) for part in indx.split(\n ',')) # creates a slice object from a string - note slice objects are not inclusive ie to select the first number it should look like [0:1]\n if dic == 'sam':\n sen.sam[(key1, key2)][indices] = value\n elif dic == 'saa':\n sen.saa[(key1, key2)][indices] = value\n elif dic == 'sap':\n sen.sap[(key1, key2)][indices] = value\n elif dic == 'sar':\n sen.sar[(key1, key2)][indices] = value\n elif dic == 'sat':\n sen.sat[(key1, key2)][indices] = value\n elif dic == 'sav':\n sen.sav[(key1, key2)][indices] = value\n\n ##checks if just slice exists\n elif not 'Unnamed' in indx:\n indices = tuple(slice(*(int(i) if i else None for i in part.strip().split(':'))) for part in indx.split(\n ',')) # creates a slice object from a string - note slice objects are not inclusive ie to select the first number it should look like [0:1]\n if dic == 'sam':\n sen.sam[key1][indices] = value\n elif dic == 'saa':\n sen.saa[key1][indices] = value\n elif dic == 'sap':\n sen.sap[key1][indices] = value\n elif dic == 'sar':\n sen.sar[key1][indices] = value\n elif dic == 'sat':\n sen.sat[key1][indices] = value\n elif dic == 'sav':\n sen.sav[key1][indices] = value\n ##checks if just key2 exists\n elif not 'Unnamed' in key2:\n if dic == 'sam':\n sen.sam[(key1, key2)] = value\n elif dic == 'saa':\n sen.saa[(key1, key2)] = value\n elif dic == 'sap':\n sen.sap[(key1, key2)] = value\n elif dic == 'sar':\n sen.sar[(key1, key2)] = value\n elif dic == 'sat':\n sen.sat[(key1, key2)] = value\n elif dic == 'sav':\n sen.sav[(key1, key2)] = value\n ##if just key1 exists\n else:\n if dic == 'sam':\n sen.sam[key1] = value\n elif dic == 'saa':\n sen.saa[key1] = value\n elif dic == 'sap':\n sen.sap[key1] = value\n elif dic == 'sar':\n sen.sar[key1] = value\n elif dic == 'sat':\n sen.sat[key1] = value\n elif dic == 'sav':\n sen.sav[key1] = value\n\n ##call sa functions - assigns sa variables to relevant inputs\n uinp.univeral_inp_sa()\n pinp.property_inp_sa()\n ##create empty dicts - have to do it here because need the trial as the first key, so whole trial can be compared when determining if pyomo needs to be run\n ###params\n params={}\n params['pas']={}\n params['rot']={}\n params['crop']={}\n params['mach']={}\n params['fin']={}\n params['labfx']={}\n params['lab']={}\n params['crplab']={}\n params['sup']={}\n params['stub']={}\n params['stock']={}\n ###report values\n r_vals={}\n r_vals['pas']={}\n r_vals['rot']={}\n r_vals['crop']={}\n r_vals['mach']={}\n r_vals['fin']={}\n r_vals['labfx']={}\n r_vals['lab']={}\n r_vals['crplab']={}\n r_vals['sup']={}\n r_vals['stub']={}\n r_vals['stock']={}\n ##call precalcs\n paspy.paspyomo_precalcs(params['pas'],r_vals['pas'])\n rotpy.rotation_precalcs(params['rot'],r_vals['rot'])\n crppy.crop_precalcs(params['crop'],r_vals['crop'])\n macpy.mach_precalcs(params['mach'],r_vals['mach'])\n finpy.fin_precalcs(params['fin'],r_vals['fin'])\n lfixpy.labfx_precalcs(params['labfx'],r_vals['labfx'])\n labpy.lab_precalcs(params['lab'],r_vals['lab'])\n lcrppy.crplab_precalcs(params['crplab'],r_vals['crplab'])\n suppy.sup_precalcs(params['sup'],r_vals['sup'])\n stubpy.stub_precalcs(params['stub'],r_vals['stub'])\n spy.stock_precalcs(params['stock'],r_vals['stock'])\n\n ##does pyomo need to be run?\n ##check if the two dicts are the same, it is possible that the current dict has less keys than the previous dict eg if a value becomes nan (because you removed the cell in excel inputs) and when it is stacked it disappears (this is very unlikely though so not going to test for it since this step is already slow)\n try: #try required in case the key (trial) doesn't exist in the old dict, if this is the case pyomo must be run\n run_pyomo_params=fun.findDiff(params, prev_params[exp_data.index[row][2]])\n except KeyError:\n run_pyomo_params= True\n ##determine if pyomo should run, note if pyomo doesn't run there will be no ful solution (they are the same as before so no need)\n lp_vars={} #create empty dict to return if pyomo isnt run\n if run_pyomo_params or exp_data1.loc[exp_data1.index[row],'runpyomo'].squeeze():\n ##call core model function, must call them in the correct order (core must be last)\n crtmod.sets() #certain sets have to be updated each iteration of exp\n rotpy.rotationpyomo(params['rot'])\n crppy.croppyomo_local(params['crop'])\n macpy.machpyomo_local(params['mach'])\n finpy.finpyomo_local(params['fin'])\n lfixpy.labfxpyomo_local(params['labfx'])\n labpy.labpyomo_local(params['lab'])\n lcrppy.labcrppyomo_local(params['crplab'])\n paspy.paspyomo_local(params['pas'])\n suppy.suppyomo_local(params['sup'])\n stubpy.stubpyomo_local(params['stub'])\n spy.stockpyomo_local(params['stock'])\n ###bounds-this must be done last because it uses sets built in some of the other modules\n bdypy.boundarypyomo_local()\n results=core.coremodel_all() #have to do this so i can access the solver status\n \n ##check if user wants full solution\n if exp_data.index[row][1] == True:\n ##make lp file\n model.write('Output\\%s.lp' %exp_data.index[row][2],io_options={'symbolic_solver_labels':True}) #file name has to have capital\n \n ##write rc and dual to txt file\n with open('Output\\Rc and Duals - %s.txt' %exp_data.index[row][2],'w') as f: #file name has to have capital\n f.write('RC\\n') \n for v in model.component_objects(pe.Var, active=True):\n f.write(\"Variable %s\\n\" %v) # \\n makes new line\n for index in v:\n try:\n print(\" \", index, model.rc[v[index]], file=f)\n except: pass \n f.write('Dual\\n') #this can be used in search to find the start of this in the txt file \n for c in model.component_objects(pe.Constraint, active=True):\n f.write(\"Constraint %s\\n\" %c) # \\n makes new line\n for index in c:\n # try:\n print(\" \", index, model.dual[c[index]], file=f)\n # except: pass \n ##prints what you see from pprint to txt file - you can see the slack on constraints but not the rc or dual\n with open('Output\\Full model - %s.txt' %exp_data.index[row][2], 'w') as f: #file name has to have capital\n f.write(\"My description of the instance!\\n\")\n model.display(ostream=f)\n \n ##This writes variable with value greater than 1 to txt file, the file is overwritten each time - used to check stuff out each iteration if you want \n file = open('Output\\Variable summary.txt','w') #file name has to have capital\n file.write('Trial: %s\\n'%exp_data.index[row][2]) #the first line is the name of the trial\n for v in model.component_objects(pe.Var, active=True):\n file.write(\"Variable %s\\n\" %v) # \\n makes new line\n for index in v:\n try:\n if v[index].value>0:\n file.write (\" %s %s\\n\" %(index, v[index].value))\n except: pass \n file.close()\n ##this prints stuff for each trial - trial name, overall profit\n print(\"\\nDisplaying Solution for trial: %s\\n\" %exp_data.index[row][2] , '-'*60,'\\n%s' %pe.value(model.profit))\n ##this check if the solver is optimal - if infeasible or error the model will quit\n if (results.solver.status == pe.SolverStatus.ok) and (results.solver.termination_condition == pe.TerminationCondition.optimal):\n print('solver optimal')# Do nothing when the solution in optimal and feasible\n elif (results.solver.termination_condition == pe.TerminationCondition.infeasible):\n print ('Solver Status: infeasible')\n sys.exit()\n else: # Something else is wrong\n print ('Solver Status: error')\n sys.exit()\n ##store profit\n r_vals['profit'] = pe.value(model.profit)\n #last step is to print the time for the current trial to run\n variables = model.component_objects(pe.Var, active=True)\n lp_vars = {str(v):{s:v[s].value for s in v} for v in variables } #creates dict with variable in it. This is tricky since pyomo returns a generator object\n ##determine expected time to completion - trials left multiplied by average time per trial &time for current loop\n dataset = list(np.flatnonzero(np.array(exp_data.index.get_level_values(0)) * np.array(exp_data1['run']))) #gets the ordinal index values for the trials the user wants to run that are not up to date\n processes = multiprocessing.cpu_count()\n total_batches = math.ceil(len(dataset) / processes )\n current_batch = math.ceil( (dataset.index(row)+1) / processes ) #add 1 because python starts at 0\n remaining_batches = total_batches - current_batch\n time_taken = time.time() - start_time1\n batch_time = time_taken / current_batch\n time_remaining = remaining_batches * batch_time\n end_time = time.time()\n print(\"total time taken this loop: \", end_time - start_time)\n print('Time remaining: %s' %time_remaining)\n\n return lp_vars, params, r_vals\n\n##3 - works when run through anaconda prompt - if 9 runs and 8 processors, the first processor to finish, will start the 9th run\n#using map it returns outputs in the order they go in ie in the order of the exp\n##the result after the different processes are done is a list of dicts (because each iteration returns a dict and the multiprocess stuff returns a list)\ndef main():\n ## Define the dataset - trials that require at least the precalcs done (user wants it run and it is out of date)\n dataset = list(np.flatnonzero(np.array(exp_data.index.get_level_values(0)) * np.array(exp_data1['run']))) #gets the ordinal index values for the trials the user wants to run that are not up to date\n ##prints out start status - number of trials to run, date and time exp.xl was last saved and output summary \n print('Number of trials to run: ',len(dataset))\n print('Number of full solutions: ',sum((exp_data.index[row][1] == True) and (exp_data.index[row][0] == True) for row in range(len(exp_data))))\n print('Exp.xlsx last saved: ',datetime.fromtimestamp(round(os.path.getmtime(\"Exp.xlsx\"))))\n ##start multiprocessing\n agents = min(multiprocessing.cpu_count(),len(dataset)) # number of agents (processes) should be min of the num of cpus or trial\n with multiprocessing.Pool(processes=agents) as pool:\n result = pool.map(exp, dataset)\n ##update run require status - trials just run are now up to date for both pyomo and precalcs - all trials that the user wanted to run are now up to date (even if they didn't run because they were already up to date)\n exp_data1.loc[exp_data1.index[dataset],['run']] = False\n exp_data1.loc[exp_data1.index[dataset],['runpyomo']] = False\n ##return pyomo results and params dict\n return dataset, result, exp_data1\n\nif __name__ == '__main__':\n dataset, results, exp_data1 = main() #returns a list is the same order of exp\n ##turn list of dicts into nested dict with trial name as key\n for trial_row, result, res_num in zip(dataset,results,range(len(results))):\n if any(results[res_num][0]): # only do this if pyomo was run and the dict contains values\n lp_vars[exp_data.index[trial_row][2]] = results[res_num][0]\n params[exp_data.index[trial_row][2]] = results[res_num][1] \n r_vals[exp_data.index[trial_row][2]] = results[res_num][2] \n ##drop results into pickle file\n with open('pkl_lp_vars.pkl', \"wb\") as f:\n pkl.dump(lp_vars, f)\n with open('pkl_params.pkl', \"wb\") as f:\n pkl.dump(params, f)\n with open('pkl_r_vals.pkl', \"wb\") as f:\n pkl.dump(r_vals, f)\n with open('pkl_exp.pkl', \"wb\") as f:\n pkl.dump(exp_data1, f)\n\n\n end=time.time()\n print('total time',end-start)\n\n\n\n\n\n\n\n","sub_path":"Exp1.py","file_name":"Exp1.py","file_ext":"py","file_size_in_byte":16619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358087616","text":"# 이동\n\nfrom openpyxl import load_workbook\nwb = load_workbook(\"sample.xlsx\")\nws = wb.active\n\n# 번호 영어 수학 -> 번호 (국어) 영어 수학\nws.move_range(\"B1:C11\", rows=0, cols=1) # B1-C11 까지를 rows 0 만큼, cols 1 만큼 이동\nws[\"B1\"].value = \"국어\" # B1 셀에 '국어' 입력\n\n# ws.move_range(\"C1:C11\", rows=5, cols=-1)\n\nwb.save(\"sample_move.xlsx\")","sub_path":"rpa_practice/1_excel/9_move.py","file_name":"9_move.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"230502440","text":"''' \nThis script demonstrates smoothing 2-d data when there is a known \ndiscontinuity in the underlying signal.\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom rbf.filter import filter\nimport rbf.halton\nnp.random.seed(1)\n\ndef signal(x):\n ''' \n this signal has a discontinuity from (0.0,-2.0) to (0.0,2.0)\n '''\n a = np.arctan2(x[:,0], 2 + x[:,1])\n b = np.arctan2(x[:,0],-2 + x[:,1])\n return (a - b)/np.pi\n\n# define the known discontinuity\nbnd_vert = np.array([[0.0, 2.0],[0.0,-2.0]])\nbnd_smp = np.array([[0,1]]) \n# create synthetic data\npnts_obs = 8*(rbf.halton.halton(200,2,start=1) - 0.5)\nsigma_obs = 0.2*np.ones(200)\nu_obs = signal(pnts_obs) + np.random.normal(0.0,sigma_obs)\n# find the filtered solution\ncutoff = 0.5\nsoln,_ = filter(pnts_obs,u_obs,sigma=sigma_obs,cutoff=cutoff,\n vert=bnd_vert,smp=bnd_smp)\n# plot the results and true signal\nvals = np.linspace(-4,4,200) \ngrid = np.reshape(np.meshgrid(vals,vals),(2,200**2)).T\nfig,axs = plt.subplots(2,1,figsize=(6,10))\naxs[0].scatter(pnts_obs[:,0],pnts_obs[:,1],c=u_obs,s=50,\n vmin=-1.0,vmax=1.0,cmap='viridis',zorder=1)\np = axs[0].tripcolor(grid[:,0],grid[:,1],signal(grid),\n vmin=-1.0,vmax=1.0,cmap='viridis',zorder=0)\naxs[0].plot(bnd_vert[:,0],bnd_vert[:,1],'r-',lw=4)\naxs[0].set_xlim((-4,4));axs[0].set_ylim((-4,4))\naxs[0].set_aspect('equal')\naxs[0].set_title(u'observed data and true signal')\nplt.colorbar(p,ax=axs[0])\naxs[1].scatter(pnts_obs[:,0],pnts_obs[:,1],c=soln,s=50,\n vmin=-1.0,vmax=1.0,cmap='viridis')\np = axs[1].tripcolor(grid[:,0],grid[:,1],signal(grid),\n vmin=-1.0,vmax=1.0,cmap='viridis',zorder=0)\naxs[1].plot(bnd_vert[:,0],bnd_vert[:,1],'r-',lw=4)\naxs[1].set_xlim((-4,4));axs[1].set_ylim((-4,4))\naxs[1].set_aspect('equal')\naxs[1].set_title(r'filtered data and true signal ($\\mathregular{\\omega_c=%s}$)' % cutoff)\nplt.colorbar(p,ax=axs[1])\nplt.tight_layout()\nplt.savefig('../figures/filter.c.png')\nplt.show()\n","sub_path":"prj_src/nn_examples/rbf_examples/RBF-master/docs/scripts/filter.c.py","file_name":"filter.c.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"503757411","text":"# https://atcoder.jp/contests/typical90/tasks/typical90_a\n# https://twitter.com/e869120/status/1377027868518064129/photo/1\n\n# WA. 解説みてもよくわからなかった\nn, l = map(int, input().split())\nk = int(input())\na = list(map(int, input().split()))\n\n\n# 切れ目で切った場合どうなるか\nsubsets = []\nfor i in range(len(a)):\n if i == 0:\n subsets.append(a[i] - 0)\n else:\n subsets.append(a[i] - a[i-1])\n if a[i] == a[-1]:\n subsets.append(l - a[i])\n\nsubsets = sorted(subsets, reverse=True)\n\nyokans = [0] * (k + 1)\nuser_id = 0\nfor i in range(len(subsets)):\n user_id += 1\n yokans[user_id - 1] += subsets[i]\n # print(i, user_id)\n if user_id == (k + 1):\n user_id = 0\n\nprint(subsets)\nprint(yokans)\nprint(min(yokans))\n","sub_path":"atcoder/typical90_a.py","file_name":"typical90_a.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"501426682","text":"import numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom random import shuffle\nimport os\n\nfinal_data = []\nleng = len(os.listdir(os.getcwd() + '/data/')) // 3\nprocessed = 0\nfor filename in os.listdir(os.getcwd() + '/data/'):\n print(filename)\n train_data = np.load('data/' + filename)\n\n all_data = []\n for data in train_data:\n img = data[0]\n choice = data[1]\n\n all_data.append([img,choice])\n\n final_data += all_data\n\nchunks = np.array_split(final_data, 5)\nfor i in range(len(chunks)): \n np.save('data/training_data-{}.npy'.format(i), chunks[i])\n\n","sub_path":"merge_data.py","file_name":"merge_data.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352417609","text":"from django.contrib import admin\nfrom django.contrib.auth import admin as auth_admin\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import gettext_lazy as _\n\nfrom server.organizations.admin import OrganizationMemberTabularInline\nfrom server.schools.admin import SchoolMemberTabularInline\nfrom server.users.forms import UserChangeForm\nfrom server.users.helpers import send_user_invite\n\nfrom .models import (\n Consumer,\n ConsumerProfile,\n Coordinator,\n CoordinatorProfile,\n Instructor,\n InstructorProfile,\n Supervisor,\n SupervisorProfile,\n Vendor,\n VendorProfile,\n)\n\nUser = get_user_model()\n\n\ndef send_invite(self, request, queryset):\n for user in queryset:\n send_user_invite(user.email)\n\n\nsend_invite.short_description = \"Invite user\"\n\n\n@admin.register(User, Supervisor)\nclass BaseUserTypesAdmin(auth_admin.UserAdmin):\n form = UserChangeForm\n fieldsets = (\n (_(\"Account info\"), {\"fields\": (\"slug\", \"email\", \"password\", \"user_type\")}),\n (_(\"Personal info\"), {\"fields\": (\"name\",)}),\n (\n _(\"Permissions\"),\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n ),\n },\n ),\n (_(\"Important dates\"), {\"fields\": (\"last_login\", \"date_joined\")}),\n )\n add_fieldsets = (\n (None, {\"classes\": (\"wide\",), \"fields\": (\"email\", \"password1\", \"password2\")}),\n )\n\n list_display = [\"email\", \"slug\"]\n search_fields = [\"email\"]\n actions = [send_invite]\n\n\n@admin.register(Coordinator, Consumer)\nclass SchoolUserTypesAdmin(BaseUserTypesAdmin):\n inlines = [SchoolMemberTabularInline]\n\n\n@admin.register(Instructor, Vendor)\nclass OrgUserTypesAdmin(BaseUserTypesAdmin):\n inlines = [OrganizationMemberTabularInline]\n\n\nadmin.site.register(CoordinatorProfile)\nadmin.site.register(ConsumerProfile)\nadmin.site.register(InstructorProfile)\nadmin.site.register(VendorProfile)\nadmin.site.register(SupervisorProfile)\n","sub_path":"server/server/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11024927","text":"import time\n\nstart_time = time.time()\nclass BSTNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n # if greater, go right\n if value >= self.value:\n # check if right exists\n if self.right is not None:\n # recursive\n self.right.insert(value)\n # if not, create a node with that value and set as right child\n else:\n new_node = BSTNode(value)\n self.right = new_node\n # else go left\n else:\n # check if left exists\n if self.left is not None:\n # recursive\n self.left.insert(value)\n else: \n new_node = BSTNode(value)\n self.left = new_node\n \n \n\n def contains(self, target):\n # when we start searching, slef will be the root\n # compare the target against self\n if target == self.value:\n return True\n if target < self.value:\n if self.left == None:\n return False\n else:\n return self.left.contains(target)\n else:\n if self.right == None:\n return False\n return self.right.contains(target)\n\n # Return the maximum value found in the tree\n\n def get_max(self):\n if self.right:\n print(\"\\nself.value: \", self.value)\n return self.right.get_max()\n\n return self.value\n\n # Call the function `fn` on the value of each node\n def for_each(self, fn):\n fn(self.value)\n\n if self.left:\n self.left.for_each(fn)\n\n if self.right:\n self.right.for_each(fn)\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = [] # Return the list of duplicates in this data structure\n\n# Replace the nested for loops below with your improvements\n\"\"\" Runtime 7.91s \"\"\"\n# for name_1 in names_1:\n # for name_2 in names_2:\n # if name_1 == name_2:\n # duplicates.append(name_1)\n\n\"\"\" FINAL RUNTIME 1.11s \"\"\"\n# wrote BSTNode above\nbst = BSTNode(\"empty_Node\")\nfor name_1 in names_1:\n bst.insert(name_1)\n\nfor name_2 in names_2:\n if bst.contains(name_2):\n duplicates.append(name_2)\n\n# Stretch Goal\n\"\"\" Runtime 1.25s \"\"\"\n# duplicates = [ name for name in names_1 if name in names_2 ] \n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"301618600","text":"#!/usr/bin/env python\nimport argparse\n\nfrom rtm_args import argconfig, parser_debug\nfrom rtm_build import rtm_build, rtm_build_default\n\n\ndescription = \"\"\"\\\nRTM builder in default mode.\nBuild, compile and install RTM for dummy.\n\"\"\"\n\n\n# default value for rtm_build_default\nrtm_buildbox_default = {}\nrtm_buildbox_default.update( rtm_build_default )\nrtm_buildbox_default = {\n 'svn_url' : argconfig(argkey='svn-url', dest='svn_url').get_default(),\n 'build_type' : 'intelgpu',\n 'cmake' : 'optimized-intel',\n 'module' : 'hudson-modules-gpu',\n}\n\n\nclass rtm_buildbox(rtm_build):\n\n def __init__(self, *args, **kwargs):\n\n # get default values\n self.update( {k: v for k, v in rtm_buildbox_default.items()} )\n\n # init of base class: init from args and kwargs\n super(rtm_buildbox, self).__init__(*args, **kwargs) \n\n\n\ndef main_args(args):\n\n a = rtm_buildbox( **vars(args) )\n\n a()\n\n\n\n# parse command line options\nparser_rtm_buildbox = argparse.ArgumentParser(prog='rtm_buildbox',\n description=description,\n parents=[parser_debug],\n conflict_handler='resolve',\n formatter_class=argparse.RawTextHelpFormatter)\n\n# svn options\n\n_parser_svn = parser_rtm_buildbox.add_argument_group(\"subversion options\")\n\n_parser_svn.add_argument('--svn-url',\n dest = 'svn_url',\n action = argconfig,\n argkey = 'svn-url',\n help = 'Define subversion url. (default %(default)s)')\n\n_parser_svn.add_argument('--svn-revision',\n dest = 'svn_revision',\n help = 'Define subversion revision.')\n\n# install options\n\n_parser_install = parser_rtm_buildbox.add_argument_group(\"install options\")\n\n_parser_install.add_argument('--install-dir',\n dest = 'dir_install',\n help = 'Define install directory. Root directory of RTM installation (Install directory includes directories: bin, include, lib).')\n\n\n\ndef main():\n\n args = parser_rtm_buildbox.parse_args()\n\n main_args(args)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"rtm_py/rtm_buildbox.py","file_name":"rtm_buildbox.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"629899422","text":"# coding:utf-8\nimport datetime\nimport functools\nimport time\nimport hashlib\nimport base64\nimport hmac\n\nfrom connect import session\n\n\nHost = \"192.168.1.88:8002\"\nSESSION_EXPIRES_SECONDS = 3600 # session数据有效期, 单位秒\nHOME_PAGE_DATA_REDIS_EXPIRE_SECOND = 7200 # 主页缓存数据过期时间 秒\n\nACCESS_TIMES = 15 # 短信验证码请求次数\nLIMIT_TIMES_REQUESTS = 600 # 限制短信请求时间\n\nTASK_INVITE_COIN = 200 # 邀请任务奖励金币数\nTASK_INVITE_INTEGRAL = 30 # 邀请任务奖励积分数\n\nDAILY_TASK_SIGN_ID = 1 # 日常任务--签到id\nDAILY_TASK_SIGN_COIN = 100 # 每日签到奖励金币数\nDAILY_TASK_SIGN_EXP = 30 # 每日签到奖励经验值(积分数)\n\nSHARE_COIN = 200 # 分享奖励金币数\nSHARE_INTEGRAL = 30 # 分享奖励积分数\n\nWEEK_TASK_ID = 6 # 唤醒任务id\n\nDEFINE_IMG = \"http://mdhtdsp.oss-cn-beijing.aliyuncs.com/center/60a394f2-3182-484c-8287-6c30d6ef79d3.png\"\nCOIN_RMB = 100000\nKEY = \"MDHT3029W\"\n\nMOVIE_SPEND = 10000\n# def required_login(fun):\n# # 保证被装饰的函数对象的__name__不变\n# @functools.wraps(fun)\n# def wrapper(request_handler_obj, *args, **kwargs):\n# # 调用get_current_user方法判断用户是否登录\n# # try:\n# # if request_handler_obj.get_current_user()['user_id']:\n# # fun(request_handler_obj, *args, **kwargs)\n# # except Exception as e:\n# # if e:\n# # request_handler_obj.write(dict(errcode='404', errmsg=\"用户未登录\"))\n# # print(request_handler_obj.get_current_user()[\"code\"])\n# if not request_handler_obj.get_current_user():\n# # session = Session(request_handler_obj)\n# # if not session.data:\n# request_handler_obj.write(dict(code=1, errcode='404', errmsg=\"用户未登录\"))\n# else:\n# fun(request_handler_obj, *args, **kwargs)\n# return wrapper\n\n#\n# def required_login(fun):\n# @functools.wraps(fun)\n# def wrapper(request_handler_obj, *args, **kwargs):\n# response = request_handler_obj.get_current_user()\n# print(response)\n# if response[\"code\"] == 1:\n# request_handler_obj.finish({\"code\": 7, \"msg\": \"重新申请token\"})\n# return\n# elif response[\"code\"] == 0:\n# fun(request_handler_obj, *args, **kwargs)\n# else:\n# request_handler_obj.finish({\"code\": 1, \"msg\": \"您还未登陆,请重新登陆\"})\n# return\n# return wrapper\n\ndef required_login(fun):\n @functools.wraps(fun)\n def wrapper(request_handler_obj, *args, **kwargs):\n response = request_handler_obj.get_current_user()\n print(response)\n if response == 1:\n request_handler_obj.finish({\"code\": 7, \"msg\": \"重新申请token\"})\n return\n elif response == 0:\n fun(request_handler_obj, *args, **kwargs)\n else:\n request_handler_obj.finish({\"code\": 1, \"msg\": \"您还未登陆,请重新登陆\"})\n return\n return wrapper\n\n# 生成token\ndef generate_token(key, expire=864000):\n r'''\n @Args:\n key: str (用户给定的key,需要用户保存以便之后验证token,每次产生token时的key 都可以是同一个key)\n expire: int(最大有效时间,单位为s)\n @Return:\n state: str\n '''\n ts_str = str(time.time() + expire)\n ts_byte = ts_str.encode(\"utf-8\")\n sha1_tshexstr = hmac.new(key.encode(\"utf-8\"),ts_byte,'sha1').hexdigest()\n token = ts_str+':'+sha1_tshexstr\n b64_token = base64.urlsafe_b64encode(token.encode(\"utf-8\"))\n return b64_token.decode(\"utf-8\")\n\n# 验证token\n\ndef certify_token(key, token):\n r'''\n @Args:\n key: str\n token: str\n @Returns:\n boolean\n '''\n token_str = base64.urlsafe_b64decode(token).decode('utf-8')\n token_list = token_str.split(':')\n if len(token_list) != 2:\n return False\n ts_str = token_list[0]\n if float(ts_str) < time.time():\n return False\n known_sha1_tsstr = token_list[1]\n sha1 = hmac.new(key.encode(\"utf-8\"), ts_str.encode('utf-8'), 'sha1')\n calc_sha1_tsstr = sha1.hexdigest()\n if calc_sha1_tsstr != known_sha1_tsstr:\n return False\n return True\n\n\ndef get_invitecoin(invite_num):\n if invite_num < 3 and invite_num > 0:\n invite_money = 6\n elif invite_num < 6 and invite_num > 2:\n invite_money = 3.60\n elif invite_num < 21 and invite_num > 5:\n invite_money = 4.75\n elif invite_num < 101 and invite_num > 20:\n invite_money = 5.60\n elif invite_num < 501 and invite_num > 100:\n invite_money = 6.00\n elif invite_num < 1001 and invite_num > 500:\n invite_money = 3.25\n elif invite_num < 2001 and invite_num > 1000:\n invite_money = 3.5\n elif invite_num < 4001 and invite_num > 2000:\n invite_money = 3.75\n else:\n invite_money = 3.00\n return invite_money * 100000\n\n\ndef user_collect():\n today = str(datetime.date.today())\n li_date = today.split('-')\n date = li_date[0] + '-' + li_date[1]\n table_name = date + '_user_collect'\n return table_name\n\n\ndef user_comment():\n today = str(datetime.date.today())\n li_date = today.split('-')\n date = li_date[0] + '-' + li_date[1]\n table_name = date + '_user_comment'\n return table_name\n\ndef user_history():\n today = str(datetime.date.today())\n li_date = today.split('-')\n date = li_date[0] + '-' + li_date[1]\n table_name = date + '_user_history'\n return table_name\n\ndef user_sign():\n today = str(datetime.date.today())\n li_date = today.split('-')\n date = li_date[0] + '-' + li_date[1]\n table_name = date + '_user_sign'\n return table_name\n\ndef withdraw():\n today = str(datetime.date.today())\n li_date = today.split('-')\n date = li_date[0] + '-' + li_date[1]\n table_name = date + '_withdraw'\n return table_name\n\nTABLE_USER_COLLECT = user_collect() # 用户收藏记录表\nTABLE_USER_COMMENT = user_comment() # 用户评论列表\nTABLE_USER_HISTORY = user_history() # 用户浏览历史表\nTABLE_USER_SIGN = user_sign() # 用户累计签到表\nTABLE_WITHDRAW = withdraw() # 用户提现记录表\n\n\n\n# 昨天、今天---日期\ndef sign_date():\n now = datetime.datetime.now()\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(days=1)\n return today, yesterday, now\n\n\n# 日期格式化\ndef str_date(date):\n li_date = str(date).split('-')\n d = ''\n for date in li_date:\n d += date\n return d\n\n\nimport socket\nimport uuid\n\ndef get_mac_address():\n mac=uuid.UUID(int = uuid.getnode()).hex[-12:]\n return \":\".join([mac[e:e+2] for e in range(0,11,2)])\n\ndef get_host_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\n\n# 密码加密\ndef make_encrypt(pwd):\n md5_pwd = hashlib.md5()\n md5_pwd.update(pwd.encode())\n result = md5_pwd.hexdigest()\n return result\n\n# 时间戳转换为日期时间\ndef time_change_date(times):\n print(times)\n timeArray = time.localtime(times)\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n return otherStyleTime\n\nif __name__ == '__main__':\n md5_phone = make_encrypt(\"18237157681\")\n print(md5_phone)","sub_path":"t1_tornado/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"539808152","text":"#!/usr/bin/env python2\nimport os.path\nimport shutil\npathname = '/home/hwj/openface/100classes'\nexts = [\"jpg\",\"png\"]\nval = []\nclasses = set()\nfor subdir ,dirs,files in os.walk(pathname):\n for fname in files:\n (imageclass,imagename) = (os.path.basename(subdir),fname)\n if any(imagename.lower().endwith(\".\" +ext) for ext in exts):\n if imageclass not in classes:\n classes.add(imageclass)\n val.append((imageclass,imagename))\n \n\n\nnewdir = os.path.join(pathname,'new')\nos.makedirs(newdir)\ni =0\nfor person,image in val[1:]:\n i+=1\n originalpath = os.path.join(pathname,person,image)\n newpath = os.path.join(pathname,'new',str(i))\n shutil.move(originalpath,newpath)\n\n \nwith open((os.path.join(pathname,'name.txt')),'w') as t:\n for imageclass,imagename in val:\n t.writelines(imagename+' '+imageclass+'\\n')","sub_path":"testfor recognization.py","file_name":"testfor recognization.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"227297873","text":"\"\"\"In Lab 07 we wrote a program to calculate energy bill for residential and business customers.\nWe are going to rewrite that program with value returning functions.\nResidential customers pay $0.12 per kWh for the first 500 kWh. After the first 500 kWh, the rate is $0.15 per kWh.\nBusiness customers pay $0.16 per kWh for the first 800 kWh. After the first 800 kWh, the rate is $0.20 per kWh.\nWrite a program to calculate energy charge. You must write and use the following functions.\n\n• A main function: Call the value returning function get_user_input, which returns kWh used and customer type.\nPass the return values to the value returning function bill_calculator as two arguments.\nDisplay the return value of bill_calculator.\n\n• A get_user_input function: This function has no parameter. It asks the user to enter number of kWh used.\nUse an input validation loop to ensure that kWh used is not negative.\nAlso ask the user to enter customer type (enter R for residential or B for business).\nConvert lowercase letter to uppercase. Use an input validation loop to ensure that customer is either R or B.\nReturn kWh used and customer type.\n\n• A bill_calculator function: This function has two parameters to receive number of kWh used and customer type.\nCalculate and return the energy charge.\n\nEXAMPLE\nEnter kilowatt hours used: 810\nEnter R for residential customer, B for business customer: R\nPlease pay this amount: 106.50\n\nThe following is another example.\n\nEnter kilowatt hours used: 810\nEnter R for residential customer, B for business customer: b\nPlease pay this amount: 130.00\n\n\"\"\"\ndef main():\n print(\"Self Service for Business and Residential customers\")\n power,customer_type=get_user_input() #to get the variables required, we must use the dual-returning function\n #print(f\"You used {power} hours as a {customer_type} customer.\") #debug for input (works!)\n print(f\"Please pay this amount: ${bill_calculator(power,customer_type):.2f}\")\n\ndef get_user_input():\n\n kwh = float(input(\"Enter number of kWh used: \"))\n #while Loop until positive\n while kwh < 0:\n print(\"kWh cannot be negative.\")\n kwh = float(input(\"Enter number of kWh used: \"))\n\n customer_type = input(\"Enter R for residential customer, B for business customer: \")\n #while Loop until r or b\n while customer_type.upper() != \"R\" and customer_type.upper() != \"B\":\n print(\"This is only for [R]esidential and [B]usiness customers.\")\n customer_type = input(\"Enter R for residential customer, B for business customer: \")\n return kwh, customer_type\n\ndef bill_calculator(power, customer_type):\n if customer_type.lower() == \"B\":\n if power > 800:\n charge = ((power - 800) * 0.20) + (800 * 0.16) # 0.16 for first 800. 0.20 for others\n else:\n charge = power * 0.16\n else: #must be a residential customer\n if power > 500:\n charge = ((power - 500) * 0.15) + (500 * 0.12) #0.12 for first 500. 0.15 for others\n else:\n charge = power * 0.12\n return charge\n\n\nmain()","sub_path":"DOCKERY-Lab08p02-CSC121.py","file_name":"DOCKERY-Lab08p02-CSC121.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352014007","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint, jsonify, request\nfrom .models import Pet, User, Question, Vote\nfrom sqlalchemy import exc\nfrom .exceptions import *\n\nroutes = Blueprint('routes', __name__)\n\n\n# ********************** ROUTES **********************\n\n@routes.route(\"hello\", strict_slashes=False)\ndef say_hello():\n return jsonify(\"Hello fellow PO's\")\n\n@routes.route(\"users\", strict_slashes=False, methods=['GET'])\ndef get_all_users():\n return jsonify(User.query.all())\n\n@routes.route(\"users/\", strict_slashes=False, methods=['GET'])\ndef get_user(user_id):\n user = User.query.get(user_id)\n return jsonify(user), 200\n\n@routes.route(\"users\", strict_slashes=False, methods=['POST'])\ndef add_user():\n payload = request.get_json()\n try:\n if \"email\" not in payload :\n raise MissingFieldError(\"email\")\n if \"username\" not in payload :\n raise MissingFieldError(\"username\")\n except KeyError as e:\n raise MissingFieldError(e.args[0])\n\n new_user = User(username = payload[\"username\"], email = payload[\"email\"], job = payload[\"job\"])\n\n try :\n new_user.save()\n except exc.IntegrityError as e:\n raise DuplicationError(e.args[0])\n\n return jsonify(new_user), 201\n\n# Create new question\n@routes.route(\"questions\", strict_slashes=False, methods=['POST'])\ndef add_question():\n payload = request.get_json()\n try:\n if \"content\" not in payload :\n raise MissingFieldError(\"content\")\n except KeyError as e:\n raise MissingFieldError(e.args[0])\n\n new_question = Question(content=payload[\"content\"], chapter_id=payload[\"chapter_id\"])\n try:\n new_question.save()\n except exc.IntegrityError as e:\n raise DuplicationError(e.args[0])\n\n return jsonify(new_question), 201\n\n# Get all questions\n@routes.route(\"questions\", strict_slashes=False, methods=['GET'])\ndef get_all_questions():\n return jsonify(Question.query.all())\n\n# Create a vote\n@routes.route(\"votes\", strict_slashes=False, methods=['POST'])\ndef add_vote():\n payload = request.get_json()\n\n try:\n if \"question_id\" not in payload :\n raise MissingFieldError(\"question_id\")\n if \"user_id\" not in payload :\n raise MissingFieldError(\"user_id\")\n except KeyError as e:\n raise MissingFieldError(e.args[0])\n\n question_id=payload[\"question_id\"]\n user_id=payload[\"user_id\"]\n print(question_id)\n print(user_id)\n\n try:\n new_vote = Vote(question_id=question_id, user_id=user_id)\n except KeyError as e:\n raise MissingFieldError(e.args[0])\n\n\n voted_question = Question.query.get(question_id)\n if voted_question is None:\n raise NotFound(question_id)\n voting_user = User.query.get(user_id)\n if voting_user is None:\n raise NotFound(user_id)\n\n try:\n voted_question.votes.append(new_vote)\n voted_question.save()\n except exc.IntegrityError as e:\n raise DuplicationError(e.args[0])\n\n voted_question = Question.query.get(question_id)\n\n return jsonify(voted_question), 201\n\n\n\n# Update question\n# @routes.route(\"questions/\", strict_slashes=False, methods=['PUT'])\n# def update_question(question_id):\n# payload = request.get_json()\n#\n# question = Question.query.filter_by(id=question_id).first()\n#\n# try:\n# updated_question = question.update(payload)\n# except AttributeError as e:\n# raise NotFound(\"question {id}\".format(id=question_id))\n#\n# return jsonify(updated_question), 200\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"263849132","text":"from typing import List\n\n\ndef append_empty_rows(rows: List[str], row_width, n_rows_required: int) -> List[str]:\n while len(rows) < n_rows_required:\n rows.append(' ' * row_width)\n\n return rows\n\ndef ensure_row_width(rows: List[str], width: int) -> List[str]:\n output = []\n for row in rows:\n actual_width = len(row)\n assert(actual_width <= width)\n\n buffer = (width - actual_width)\n output.append(\n row + (' ' * buffer)\n )\n\n return output\n","sub_path":"src/crazy_joe/utils/list_utils.py","file_name":"list_utils.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"529009042","text":"#https://www.tutorialspoint.com/python_data_structure/python_graphs.htm\nclass graph:\n def __init__(self,gdict=None):\n if gdict is None:\n gdict = {}\n self.gdict = gdict\n \n def getvertices(self):\n return list(self.gdict.keys())\n\n def getedges(self):\n edgename = []\n for edges in self.gdict:\n for edge in self.gdict[edges]:\n if {edge,edges} not in edgename:\n edgename.append({edge,edges})\n return edgename\n\n def addvertix(self,node):\n if node not in self.gdict:\n self.gdict[node] = []\n\n def addedge(self,edge):\n edge = set(edge)\n (v1,v2) = tuple(edge)\n if v1 in self.gdict[v1]:\n self.gdict[v1].append(v2)\n else:\n self.gdict[v1] = [v2]\n\n\n def findedges(self):\n edgename = []\n for edges in self.gdict:\n for edge in self.gdict[edges]:\n if {edge,edges} not in edgename:\n edgename.append({edge,edges})\n return edgename\n\n\ngraph_elements = { \"a\" : [\"b\",\"c\"],\n \"b\" : [\"a\", \"d\"],\n \"c\" : [\"a\", \"d\"],\n \"d\" : [\"e\"],\n \"e\" : [\"d\"]\n }\n\n\n\ng = graph(graph_elements)\n#g.addedge({'a','g'})\nprint(g.findedges())\n#print(g.getedges())","sub_path":"Leetcode/graph_sample.py","file_name":"graph_sample.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"243458520","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Bonjour! Yeah!\n\"\"\"\n\nimport logging\nimport os\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,\n InlineQueryHandler)\nfrom telegram.ext.dispatcher import run_async\nfrom telegram import InlineQueryResultArticle, InputTextMessageContent\nfrom telegram.error import (TelegramError, Unauthorized, BadRequest,\n TimedOut, ChatMigrated, NetworkError)\n\n#Log issues\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\n#Authentication token obtained through botfather\nToken = os.environ['TOKEN']\n\n#Initialize and link updater (input) and dispatcher (output)\nupdater = Updater(token=Token)\ndispatcher = updater.dispatcher\n\n#Function to activate on sending /start, a message, /caps... to the bot\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=\"I'm a bot, please talk to me!\")\n\n\ndef echo(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=update.message.from_user.first_name)\n\n\ndef caps(bot, update, args):\n text_caps = ' '.join(args).upper()\n bot.send_message(chat_id=update.message.chat_id,\n text=text_caps)\n\n\ndef unknown(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=\"Sorry, I didn't understand that command.\")\n\n\n\n# def error_callback(bot, update, error):\n# try:\n# raise error\n# except Unauthorized:\n# print(\"allo\")\n# # remove update.message.chat_id from conversation list\n# except BadRequest:\n# print(\"allo\")\n# # handle malformed requests - read more below!\n# except TimedOut:\n# print(\"allo\")\n# # handle slow connection problems\n# except NetworkError:\n# print(\"allo\")\n# # handle other connection problems\n# except ChatMigrated as e:\n# print(\"allo\")\n# # the chat_id of a group has changed, use e.new_chat_id instead\n# except TelegramError:\n# print(\"allo\")\n# # handle all other telegram related errors\n#\n# dispatcher.add_error_handler(error_callback)\n\n# from functools import wraps\n# from telegram import ChatAction\n#\n# def send_action(action):\n# \"\"\"Sends `action` while processing func command.\"\"\"\n#\n# def decorator(func):\n# @wraps(func)\n# def command_func(*args, **kwargs):\n# bot, update = args\n# bot.send_chat_action(chat_id=update.message.chat_id, action=action)\n# func(bot, update, **kwargs)\n# return command_func\n#\n# return decorator\n\n#Add handlers (/start, message) to the dispatcher. Allows passing arguments for /caps\nstart_handler = CommandHandler('start', start)\necho_handler = MessageHandler(Filters.text, echo)\ncaps_handler = CommandHandler('caps', caps, pass_args=True)\nunknown_handler = MessageHandler(Filters.command, unknown)\n\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(echo_handler)\ndispatcher.add_handler(caps_handler)\ndispatcher.add_handler(unknown_handler)\n\nif __name__ == \"__main__\":\n #Activate bot\n updater.start_polling()\n","sub_path":"Bot/bot_test.py","file_name":"bot_test.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"245200877","text":"''' 3. Faça um Programa que leia 20 números inteiros e armazene-os num vetor. Armazene os\nnúmeros pares no vetor PAR e os números IMPARES no vetor impar. Imprima os três vetores.'''\n\nimport random\nmestre = []\npar = []\nimpar = []\ni = 0\nprint('Gerando valores aleatórios com RANDOM...............')\nwhile i <= 20:\n \n #n = int(input('Digite valor: '))\n #mestre.append(n)\n\n #Gerando valores aleatórios com RANDOM de 1 até 100\n mestre.append(random.randint(1,100))\n\n if mestre[i] % 2 == 0:\n par.append(mestre[i])\n else:\n impar.append(mestre[i])\n i = i + 1\nprint('Os 20 valores digitados são: ',mestre)\nprint('Os valores pares são: ',par)\nprint('Os valores impares são: ',impar)\n","sub_path":"Lista de Exercícios - Arrays/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78972125","text":"from grafo import Grafo\n\n\ndef get_sets(grafo):\n\ts1 = [0]\n\ts2 = []\n\tt = [False] * grafo.vertices\n\tt[0] = True\n\t\n\ti = 0\n\twhile(False in t):\n\t\tvz = grafo.vizinhos(i)\n\t\tif (i in s1):\n\t\t\tfor v in vz:\n\t\t\t\tif (not t[v]):\n\t\t\t\t\ts2.append(v)\n\t\t\t\t\tt[v] = True\n\t\telif (i in s2):\n\t\t\tfor v in vz:\n\t\t\t\tif (not t[v]):\n\t\t\t\t\ts1.append(v)\n\t\t\t\t\tt[v] = True\n\t\t\n\t\ti = (i + 1) % grafo.vertices\n\treturn s1, s2 \n\ndef BFS(grafo, x, mate, d):\n\tq = []\n\tfor v in x:\n\t\tif (mate[v] == -1):\n\t\t\td[v] = 0\n\t\t\tq.insert(0, v)\n\t\telse:\n\t\t\td[v] = grafo.inf\n\t\n\td[-1] = grafo.inf\n\t\n\twhile (q):\n\t\tv = q.pop()\n\t\tif (d[v] < d[-1]):\n\t\t\tvizinhos = grafo.vizinhos(v)\n\t\t\tfor vizinho in vizinhos:\n\t\t\t\tif (d[mate[vizinho]] == grafo.inf):\n\t\t\t\t\td[mate[vizinho]] = d[v] + 1\n\t\t\t\t\tq.insert(0, mate[vizinho])\n\t\n\treturn d[-1] != grafo.inf, d\n\ndef DFS(grafo, mate, xx, d):\n\tif xx != -1:\n\t\tvizinhos = grafo.vizinhos(xx)\n\t\tfor y in vizinhos:\n\t\t\tif (d[mate[y]] == d[xx] + 1):\n\t\t\t\tvalidez, d, mate2 = DFS(grafo, mate, mate[y], d)\n\t\t\t\tif (validez):\n\t\t\t\t\tmate = mate2.copy()\n\t\t\t\t\tmate[y] = xx\n\t\t\t\t\tmate[xx] = y\n\t\t\t\t\treturn True, d, mate\n\t\td[xx] = grafo.inf\n\t\treturn False, d, mate\n\treturn True, d, mate\n\ndef hopcroft_karp_resultado(resultado):\n\tm, mate, x = resultado\n\n\tprint(\"valor: %d\\n\" % m)\n\tprint(\"arestas: \")\n\tfor v in x:\n\t\tprint(\"%d - %d;\" % (v + 1, mate[v] + 1), end = \" \")\n\tprint()\n\ndef hopcroft_karp(grafo):\n\td = [grafo.inf] * (grafo.vertices + 1)\n\tmate = [-1] * (grafo.vertices)\n\t\n\tx, y = get_sets(grafo)\n\tm = 0\n\n\twhile(True):\n\t\tvalidez, d = BFS(grafo, x, mate, d)\n\t\tif (not validez):\n\t\t\tbreak\n\t\t\n\t\tfor xx in x:\n\t\t\tif (mate[xx] == -1):\n\t\t\t\tvalidez, d, mate2 = DFS(grafo, mate, xx, d)\n\t\t\t\tif (validez):\n\t\t\t\t\tmate = mate2.copy()\n\t\t\t\t\tm += 1\n\treturn m, mate, x\n\nif __name__ == \"__main__\":\n\tgrafo = Grafo(\"../entradas/emparelhamento_maximo/gr512_30.gr\")\n\thopcroft_karp_resultado(hopcroft_karp(grafo))","sub_path":"algoritmos/hopcroft_karp.py","file_name":"hopcroft_karp.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"516994861","text":"#!/usr/bin/en python\n\"\"\"\nExample: simple line plot.\nShow how to make and save a simple line plot with labels, title and grid\n\"\"\"\nimport numpy\nimport pylab\n\nrho = numpy.arange(0.0, 1.0+0.001, 0.001)\ne = numpy.arccos(rho) / 3.14\n\n\nn=2\n\nPr_E = 1 - pow((1-e),n-1)\n\n\n\n#k = 2\nt1 = pow((1-e), (n-1))\nt2 = 1-pow(1-e,n-1)\n\nPu_k2 = pow(t1, 2) + pow(t2, 2)\nPr_k2 = (1.0/Pu_k2) * pow(t2, 2)\n\n\n#k = 3\nt1 = pow((1-e), (n-1))\nt2 = 1-pow(1-e,n-1)\n\nPu_k3 = pow(t1, 3) + 3*t1*pow(t2, 2)\nPr_k3 = (1.0/Pu_k3) * (2 * pow((1-e), n-1) * pow((1-pow(1-e,n-1)), 2))\n\n\n#k = 4\nt1 = pow((1-e), (n-1))\nt2 = 1-pow(1-e,n-1)\n\nPu_k4 = pow(t1, 4) + 6*pow(t1, 2)*pow(t2, 2) + pow(t2, 4)\nPr_k4 = (1.0/Pu_k4) * (3*pow(t1, 2)*pow(t2, 2) + pow(t2, 4))\n\n\n\npylab.plot (e, Pr_E, 'k--', e, Pr_k2, 'r-', e, Pr_k3, 'g-', e, Pr_k4, 'b-')\n\n\npylab.xlabel(\"'e' : arccos(overlap)/pi\\n[Dotted : Simple Attack, Red : k=2, Green : k=3, Blue : k=4]\")\npylab.ylabel('Pr')\npylab.title('** Probablity of repulsive steps **')\npylab.grid(True)\npylab.savefig('Pr_for_diff_k_m2_plot')\n\npylab.show()\n","sub_path":"plots/Pa_Pr_plots/plotter_pr_vs_k_m2.py","file_name":"plotter_pr_vs_k_m2.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"464751875","text":"import xml.etree.cElementTree as ET\n\nfrom data_interface.data_types.FinancialClasses import AccountsManager\n\n\nclass XmlFileInterface(object):\n FILE_EXTENSION = 'xml'\n\n def __init__(self, account_manager):\n self.accounts_manager = account_manager\n\n def get_filepath(self, filename=None):\n if filename is None:\n filename = self.accounts_manager.name\n filename = filename + \".\" + XmlFileInterface.FILE_EXTENSION\n return filename\n\n def export_user_data(self, filename=None):\n root = ET.Element(\"root\")\n manager = ET.SubElement(root, \"manager\")\n manager.set('name', self.accounts_manager.name)\n\n for account in self.accounts_manager.accounts:\n account_xml = ET.SubElement(manager, 'account')\n account_xml.set('name', account.account_name)\n account_xml.set('total', str(account.current_total))\n\n for transaction in account.transactions:\n transaction_xml = ET.SubElement(account_xml, 'transaction')\n transaction_xml.set('transaction_type', str(transaction.transaction_type))\n transaction_xml.set('amount', str(transaction.amount))\n transaction_xml.set('origin', transaction.origin)\n transaction_xml.set('date', transaction.start_date)\n\n tree = ET.ElementTree(root)\n tree.write(self.get_filepath(filename))\n\n def import_user_data(self, filename=None):\n\n tree = ET.parse(filename)\n root = tree.getroot()\n for manager_xml in root:\n manager_name = manager_xml.attrib['name']\n self.accounts_manager = AccountsManager(manager_name)\n for account_xml in manager_xml:\n account_name = account_xml.attrib['name']\n account_obj = self.accounts_manager.add_account(account_name)\n for transaction_xml in account_xml:\n transaction_type = int(transaction_xml.attrib['transaction_type'])\n amount = int(transaction_xml.attrib['amount'])\n origin = transaction_xml.attrib['origin']\n date = transaction_xml.attrib['date']\n\n account_obj.add_transaction(transaction_type, amount, origin, date)","sub_path":"data_interface/xml_interface.py","file_name":"xml_interface.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"515107541","text":"import sqlite3\r\n#connecting to sqlite\r\nconn = sqlite3.connect('data.sqlite')\r\ncur = conn.cursor()\r\n\r\nstudents_list = list()\r\ncur.execute('SELECT * FROM a_batch')\r\nrows = cur.fetchall()\r\ni = 0\r\nfor row in rows:\r\n row_list = list(row)\r\n students_list.append(row_list)\r\n students_list[i].append(\"a\")\r\n i = i + 1\r\n\r\ncur.execute('SELECT * FROM b_batch')\r\nrows = cur.fetchall()\r\ni = 50\r\nfor row in rows:\r\n row_list = list(row)\r\n students_list.append(row_list)\r\n students_list[i].append(\"b\")\r\n i = i + 1\r\n\r\n\r\nprint(\"Enter the attendance list:\")\r\n\r\n#inputing students present\r\ninputs = []\r\nwhile True:\r\n inp = input()\r\n if inp == \"\":\r\n break\r\n inputs.append(inp)\r\n\r\n#common students\r\n\r\ncommon_students = []\r\nfor i in students_list :\r\n for j in inputs :\r\n if( i[1] == j) :\r\n x =str(i[0]) + \" \" + i[1]\r\n common_students.append(x)\r\n\r\n\r\n#students present in A batch\r\nstudents_a = []\r\nfor i in students_list :\r\n if(i[2]== 'a') :\r\n x = str(i[0]) + \" \" + i[1]\r\n students_a.append(x)\r\n\r\n\r\nprint(\"LIST OF ABSENTEES IN A BATCH\")\r\n# absentees in A batch\r\nabsentees_a = list(set(students_a) - set(common_students))\r\ncount_a =len(absentees_a)\r\nf = open(\"aba.txt\", \"a\")\r\nf.truncate(0)\r\nfor i in absentees_a :\r\n f.write(i)\r\n f.write(\"\\n\")\r\n print(i)\r\nprint(\"Number of students absent in A batch:\"+str(count_a))\r\nf.close()\r\n\r\n\r\n# students present in B batch\r\nstudents_b = []\r\nfor i in students_list:\r\n if (i[2] == 'b'):\r\n x = str(i[0]) + \" \" + i[1]\r\n students_b.append(x)\r\n\r\nprint(\"\\nLIST OF ABSENTEES IN B BATCH\")\r\n#absentees in B batch\r\n\r\nabsentees_b = list(set(students_b) - set(common_students))\r\ncount_b = len(absentees_b)\r\nf = open(\"abb.txt\", \"a\")\r\nf.truncate(0)\r\n\r\nfor i in absentees_b:\r\n\r\n f.write(i)\r\n f.write(\"\\n\")\r\n print(i)\r\nprint(\"Number of students absent in B batch:\"+str(count_b))\r\nf.close()\r\n\r\n\r\n#unidentified students\r\nunknown_list = []\r\nfor i in students_list :\r\n for j in inputs :\r\n if( i[1] == j) :\r\n unknown_list.append(j)\r\n\r\nprint(\"\\nLIST OF UNIDENTIFIED STUDENTS\")\r\n#unknown students list\r\nunknown_stuents = list(set(inputs) - set(unknown_list))\r\nf = open(\"stun.txt\", \"a\")\r\nf.truncate(0)\r\n\r\nfor i in unknown_stuents :\r\n f.write(i)\r\n f.write(\"\\n\")\r\n print(i)\r\nf.close()\r\nprint(\"Total number of Absentees:\"+str(count_a+count_b))","sub_path":"attendance checker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91097372","text":"import json\nclass CountElementJSON:\n def FindElement(id,id_name,file_name):\n counter = 0\n data = [json.loads(line) for line in open(file_name, 'r')]\n for find_id in range(len(data)):\n if data[find_id][id_name] in id:\n counter+=1\n print('Найдено элементов \"'+id_name+'\" в кол-ве: '+ str(counter))\n return counter\n\nobj = CountElementJSON();\nobj = CountElementJSON.FindElement([1],\"user\",'file.json')\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"482348844","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport tensorflow as tf\nimport tensorflow.contrib.crf as crf\n\nfrom tensorflow.contrib import lookup\nfrom config import FLAGS\nfrom utils.data_utils import DataUtils\nfrom utils.tensorflow_utils import TensorflowUtils\nfrom model import SequenceLabelingModel\n\n\nclass Predict(object):\n\n def __init__(self):\n self.vocab_path = FLAGS.vocab_path\n self.checkpoint_path = FLAGS.checkpoint_path\n self.freeze_graph_path = FLAGS.freeze_graph_path\n self.saved_model_path = FLAGS.saved_model_path\n\n self.use_crf = FLAGS.use_crf\n self.num_steps = FLAGS.num_steps\n\n self.default_label = FLAGS.default_label\n self.default_score = FLAGS.default_predict_score\n\n self.data_utils = DataUtils()\n self.tensorflow_utils = TensorflowUtils()\n self.num_classes = self.data_utils.get_vocabulary_size(os.path.join(FLAGS.vocab_path, 'labels_vocab.txt'))\n self.sequence_labeling_model = SequenceLabelingModel()\n self.init_predict_graph()\n\n\n def init_predict_graph(self):\n \"\"\"\n init predict model graph\n :return:\n \"\"\"\n # split 1-D String dense Tensor to words SparseTensor\n self.input_sentences = tf.placeholder(dtype=tf.string, shape=[None], name='input_sentences')\n sparse_words = tf.string_split(self.input_sentences, delimiter=' ')\n\n # slice SparseTensor\n valid_indices = tf.less(sparse_words.indices, tf.constant([self.num_steps], dtype=tf.int64))\n valid_indices = tf.reshape(tf.split(valid_indices, [1, 1], axis=1)[1], [-1])\n valid_sparse_words = tf.sparse_retain(sparse_words, valid_indices)\n\n excess_indices = tf.greater_equal(sparse_words.indices, tf.constant([self.num_steps], dtype=tf.int64))\n excess_indices = tf.reshape(tf.split(excess_indices, [1, 1], axis=1)[1], [-1])\n excess_sparse_words = tf.sparse_retain(sparse_words, excess_indices)\n\n # compute sentences lengths\n int_values = tf.ones(shape=tf.shape(valid_sparse_words.values), dtype=tf.int64)\n int_valid_sparse_words = tf.SparseTensor(indices=valid_sparse_words.indices, values=int_values,\n dense_shape=valid_sparse_words.dense_shape)\n input_sentences_lengths = tf.sparse_reduce_sum(int_valid_sparse_words, axis=1)\n\n # sparse to dense\n default_padding_word = self.data_utils._START_VOCAB[0]\n words = tf.sparse_to_dense(sparse_indices=valid_sparse_words.indices,\n output_shape=[valid_sparse_words.dense_shape[0], self.num_steps],\n sparse_values=valid_sparse_words.values,\n default_value=default_padding_word)\n\n # dict words to ids\n with open(os.path.join(self.vocab_path, 'words_vocab.txt'), encoding='utf-8', mode='rt') as data_file:\n words_table_list = [line.strip() for line in data_file if line.strip()]\n words_table_tensor = tf.constant(words_table_list, dtype=tf.string)\n words_table = lookup.index_table_from_tensor(mapping=words_table_tensor, default_value=self.data_utils._START_VOCAB_ID[3])\n # words_table = lookup.index_table_from_file(os.path.join(vocab_path, 'words_vocab.txt'), default_value=3)\n words_ids = words_table.lookup(words)\n\n # blstm model predict\n with tf.variable_scope('model', reuse=None):\n logits = self.sequence_labeling_model.inference(words_ids, input_sentences_lengths, self.num_classes, is_training=False)\n\n if self.use_crf:\n logits = tf.reshape(logits, shape=[-1, self.num_steps, self.num_classes])\n transition_params = tf.get_variable(\"transitions\", [self.num_classes, self.num_classes])\n input_sentences_lengths = tf.to_int32(input_sentences_lengths)\n predict_labels_ids, sequence_scores = crf.crf_decode(logits, transition_params, input_sentences_lengths)\n predict_labels_ids = tf.to_int64(predict_labels_ids)\n sequence_scores = tf.reshape(sequence_scores, shape=[-1, 1])\n normalized_sequence_scores = self.tensorflow_utils.score_normalize(sequence_scores)\n predict_scores = tf.matmul(normalized_sequence_scores, tf.ones(shape=[1, self.num_steps], dtype=tf.float32))\n else:\n props = tf.nn.softmax(logits)\n max_prop_values, max_prop_indices = tf.nn.top_k(props, k=1)\n predict_labels_ids = tf.reshape(max_prop_indices, shape=[-1, self.num_steps])\n predict_labels_ids = tf.to_int64(predict_labels_ids)\n predict_scores = tf.reshape(max_prop_values, shape=[-1, self.num_steps])\n predict_scores = tf.as_string(predict_scores, precision=3)\n\n # dict ids to labels\n with open(os.path.join(self.vocab_path, 'labels_vocab.txt'), encoding='utf-8', mode='rt') as data_file:\n labels_table_list = [line.strip() for line in data_file if line.strip()]\n labels_table_tensor = tf.constant(labels_table_list, dtype=tf.string)\n labels_table = lookup.index_to_string_table_from_tensor(mapping=labels_table_tensor, default_value=self.default_label)\n # labels_table = lookup.index_to_string_table_from_file(os.path.join(vocab_path, 'labels_vocab.txt'), default_value='O')\n predict_labels = labels_table.lookup(predict_labels_ids)\n\n sparse_predict_labels = self.tensorflow_utils.sparse_concat(predict_labels, valid_sparse_words, excess_sparse_words, self.default_label)\n sparse_predict_scores = self.tensorflow_utils.sparse_concat(predict_scores, valid_sparse_words, excess_sparse_words, '0.0')\n\n self.format_predict_labels = self.tensorflow_utils.sparse_string_join(sparse_predict_labels, 'predict_labels')\n self.format_predict_scores = self.tensorflow_utils.sparse_string_join(sparse_predict_scores, 'predict_scores')\n\n saver = tf.train.Saver()\n tables_init_op = tf.tables_initializer()\n\n self.sess = tf.Session()\n self.sess.run(tables_init_op)\n ckpt = tf.train.get_checkpoint_state(self.checkpoint_path)\n if ckpt and ckpt.model_checkpoint_path:\n print('read model from {}'.format(ckpt.model_checkpoint_path))\n saver.restore(self.sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found at %s' % self.checkpoint_path)\n return\n\n\n def predict(self, words_list):\n \"\"\"\n Predict labels, the operation of transfer words to ids is processed by tensorflow tensor\n Input words list\n :param words_list:\n :return:\n \"\"\"\n split_words_list = []\n map_split_indexes = []\n for index in range(len(words_list)):\n temp_words_list = self.data_utils.split_long_sentence(words_list[index], self.num_steps)\n map_split_indexes.append(list(range(len(split_words_list), len(split_words_list) + len(temp_words_list))))\n split_words_list.extend(temp_words_list)\n\n predict_labels, predict_scores = self.sess.run([self.format_predict_labels, self.format_predict_scores], feed_dict={self.input_sentences: split_words_list})\n predict_labels_str = [predict_label.decode('utf-8') for predict_label in predict_labels]\n predict_scores_str = [predict_score.decode('utf-8') for predict_score in predict_scores]\n\n merge_predict_labels_str = []\n merge_predict_scores_str = []\n for indexes in map_split_indexes:\n merge_predict_label_str = ' '.join([predict_labels_str[index] for index in indexes])\n merge_predict_labels_str.append(merge_predict_label_str)\n merge_predict_score_str = ' '.join([predict_scores_str[index] for index in indexes])\n merge_predict_scores_str.append(merge_predict_score_str)\n\n return merge_predict_labels_str, merge_predict_scores_str\n\n\n def file_predict(self, data_filename, predict_filename):\n \"\"\"\n Predict data_filename, save the predict result into predict_filename\n The label is split into single word, -B -M -E -S\n :param data_filename:\n :param predict_filename:\n :return:\n \"\"\"\n print('Predict file ' + data_filename)\n sentence_list = []\n words_list = []\n labels_list = []\n predict_labels_list = []\n with open(data_filename, encoding='utf-8', mode='rt') as data_file:\n for line in data_file:\n words, labels = self.data_utils.split(line)\n if words and labels:\n sentence_list.append(''.join(words))\n words_list.append(' '.join(words))\n labels_list.append(' '.join(labels))\n predict_labels, _ = self.predict([' '.join(words)])\n predict_labels_list.append(predict_labels[0])\n word_predict_label_list = []\n word_category_list = []\n word_predict_category_list = []\n for (words, labels, predict_labels) in zip(words_list, labels_list, predict_labels_list):\n word_list = words.split()\n label_list = labels.split()\n predict_label_list = predict_labels.split()\n word_predict_label = ' '.join([word + '/' + predict_label for (word, predict_label) in zip(word_list, predict_label_list)])\n word_predict_label_list.append(word_predict_label)\n # merge label\n merge_word_list, merge_label_list = self.data_utils.merge_label(word_list, label_list)\n word_category = ' '.join([word + '/' + label for (word, label) in zip(merge_word_list, merge_label_list) if label != self.default_label])\n word_category_list.append(word_category)\n # merge predict label\n merge_predict_word_list, merge_predict_label_list = self.data_utils.merge_label(word_list, predict_label_list)\n word_predict_category = ' '.join([predict_word + '/' + predict_label for (predict_word, predict_label) in\n zip(merge_predict_word_list, merge_predict_label_list) if predict_label != 'O'])\n word_predict_category_list.append(word_predict_category)\n with open(predict_filename, encoding='utf-8', mode='wt') as predict_file:\n for (sentence, word_predict_label, word_category, word_predict_category) in \\\n zip(sentence_list, word_predict_label_list, word_category_list, word_predict_category_list):\n predict_file.write('Passage: ' + sentence + '\\n')\n predict_file.write('SinglePredict: ' + word_predict_label + '\\n')\n predict_file.write('Merge: ' + word_category + '\\n')\n predict_file.write('MergePredict: ' + word_predict_category + '\\n\\n')\n\n\n def freeze_graph(self):\n \"\"\"\n Save graph into .pb file\n :return:\n \"\"\"\n graph = tf.graph_util.convert_variables_to_constants(self.sess, self.sess.graph_def, ['init_all_tables', 'predict_labels', 'predict_scores'])\n tf.train.write_graph(graph, self.freeze_graph_path, 'frozen_graph.pb', as_text=False)\n print('Successfully freeze model to %s' % self.freeze_graph_path)\n\n\n def saved_model_pb(self):\n \"\"\"\n Saved model into .ph and variables files, loading it by tensorflow serving,\n :return:\n \"\"\"\n saved_model_path = os.path.join(self.saved_model_path, '1')\n if os.path.exists(saved_model_path):\n shutil.rmtree(saved_model_path)\n builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)\n input_tensor_info = tf.saved_model.utils.build_tensor_info(self.input_sentences)\n output_labels_tensor_info = tf.saved_model.utils.build_tensor_info(self.format_predict_labels)\n output_scores_tensor_info = tf.saved_model.utils.build_tensor_info(self.format_predict_scores)\n prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'input_sentences': input_tensor_info},\n outputs={'predict_labels': output_labels_tensor_info, 'predict_scores': output_scores_tensor_info},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\n )\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n builder.add_meta_graph_and_variables(\n self.sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={'predict_segment': prediction_signature},\n legacy_init_op=legacy_init_op\n )\n builder.save()\n print('Successfully exported model to %s' % saved_model_path)\n\n\ndef main(_):\n predict = Predict()\n\n # sentence = '张伟在6月16号会去一趟丹棱街中国移动营业厅'\n # sentence = ''.join(sentence.split())\n # words = ' '.join([char for char in sentence])\n # predict_labels, predict_scores = predict.predict([words, '你 好'])\n # print(predict_labels)\n # print(predict_scores)\n #\n # predict.freeze_graph()\n predict.saved_model_pb()\n\n # predict.file_predict(os.path.join(FLAGS.datasets_path, 'test.txt'), os.path.join(FLAGS.datasets_path, 'test_predict.txt'))\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":13307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274792968","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom osc_lib.command import command\nfrom osc_lib import exceptions\nfrom osc_lib.i18n import _\n\nfrom tripleoclient.workflows import roles\n\n\nclass ListRoles(command.Lister):\n \"\"\"List the current and available roles in a given plan\"\"\"\n\n log = logging.getLogger(__name__ + \".ListRoles\")\n\n def get_parser(self, prog_name):\n parser = super(ListRoles, self).get_parser(prog_name)\n parser.add_argument(\n '--name',\n dest='name',\n default='overcloud',\n help=_('The name of the plan, which is used for the object '\n 'storage container, workflow environment and orchestration '\n 'stack names.'),\n )\n parser.add_argument(\n '--detail',\n action='store_true',\n help=_('Include details about each role'))\n parser.add_argument(\n '--current',\n action='store_true',\n help=_('Only show the information for the roles currently enabled '\n 'for the plan.'))\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action({})'.format(parsed_args))\n\n if parsed_args.current:\n result = roles.list_roles(\n self.app.client_manager.workflow_engine,\n container=parsed_args.name,\n detail=parsed_args.detail)\n else:\n result = roles.list_available_roles(\n self.app.client_manager,\n container=parsed_args.name)\n # The workflow returns all the details by default, trim\n # them down if not required.\n if not parsed_args.detail:\n result = [r['name'] for r in result]\n\n if parsed_args.detail:\n if result:\n result.sort(key=lambda r: r['name'])\n\n role_list = self.format_role_details(result)\n column_names = (\"Role Name\",\n \"Description\",\n \"Services Default\",\n \"Other Details\")\n return (column_names, role_list)\n else:\n if result:\n result.sort()\n return ((\"Role Name\",), [(r,) for r in result])\n\n def format_role_details(self, result):\n role_list = []\n for r in result:\n name = r.pop('name')\n description = service_defaults = ''\n detail = []\n\n if 'description' in r:\n description = r.pop('description')\n if 'ServicesDefault' in r:\n r['ServicesDefault'].sort()\n service_defaults = '\\n'.join(r.pop('ServicesDefault'))\n for k, v in r.items():\n detail.append(\"%s: %s\" % (k, v))\n\n role_list.append((name, description, service_defaults,\n '\\n'.join(detail)))\n return role_list\n\n\nclass ShowRole(command.ShowOne):\n \"\"\"Show details for a specific role, given a plan\"\"\"\n\n log = logging.getLogger(__name__ + \".ShowRole\")\n\n def get_parser(self, prog_name):\n parser = super(ShowRole, self).get_parser(prog_name)\n parser.add_argument(\n '--name',\n dest='name',\n default='overcloud',\n help=_('The name of the plan, which is used for the object '\n 'storage container, workflow environment and orchestration '\n 'stack names.'),\n )\n parser.add_argument('role',\n metavar=\"\",\n help=_('Name of the role to look up.'))\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug('take_action({})'.format(parsed_args))\n\n role = self.get_role_details(parsed_args.name, parsed_args.role)\n if not role:\n raise exceptions.CommandError(\n \"Could not find role %s\" % parsed_args.role)\n\n return self.format_role(role)\n\n def get_role_details(self, name, role_name):\n result = roles.list_available_roles(\n self.app.client_manager,\n container=name)\n\n for r in result:\n if r['name'] == role_name:\n return r\n return []\n\n def format_role(self, role):\n column_names = ['Name']\n data = [role.pop('name')]\n\n if 'description' in role:\n column_names.append('Description')\n data.append(role.pop('description'))\n if 'ServicesDefault' in role:\n column_names.append('Services Default')\n role['ServicesDefault'].sort()\n data.append('\\n'.join(role.pop('ServicesDefault')))\n\n other_fields = list(role.keys())\n other_fields.sort()\n for field in other_fields:\n column_names.append(field)\n data.append(role[field])\n\n return column_names, data\n","sub_path":"tripleoclient/v1/overcloud_plan_roles.py","file_name":"overcloud_plan_roles.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"471616199","text":"###################################\r\n# CS B551 Fall 2019, Assignment #3\r\n#\r\n# Your names and user ids: Viral Prajapati vkprajap\r\n#\r\n# (Based on skeleton code by D. Crandall)\r\n#\r\n\r\nimport random\r\nimport math\r\nimport copy\r\nimport numpy as np\r\nfrom collections import Counter\r\n\r\nclass Solver:\r\n \r\n# calculating the priors for all the POS tags or labels\r\n def probS(self, cntTags, tags):\r\n posTags = [key for key in cntTags]\r\n total = len(tags)\r\n PSi = {}\r\n for tag in posTags:\r\n Ps = cntTags[tag] / total\r\n PSi[tag] = Ps\r\n return PSi\r\n\r\n#calculating initial probability of all the first word of sentences in the data for viterbi approach\r\n def initProb(self, data):\r\n temp = []\r\n i = 0\r\n for i in range(len(data)):\r\n temp.append(data[i][1][0])\r\n cntTemp = Counter(temp)\r\n initProbs = []\r\n for k, v in cntTemp.items():\r\n initProbs.append((v/len(temp), k))\r\n return initProbs\r\n\r\n#calculating transition probability of all the labels from the data\r\n def transProb(self, tags, cntTags):\r\n posTags = [key for key in cntTags]\r\n tranProbTable = []\r\n for i in posTags:\r\n temp1 = []\r\n for j in range(len(posTags)):\r\n count = 0.00000001\r\n for k in range(len(tags)-1):\r\n if tags[k] == i and tags[k+1] == posTags[j]:\r\n count+=1\r\n temp1.append(((count/cntTags[i]),posTags[j]))\r\n tranProbTable.append(temp1)\r\n return tranProbTable\r\n\r\n#calculating emission probability of the words in the test data or given sentence\r\n def emissProb(self, sentence, words, tags, cntTags):\r\n posTags = [key for key in cntTags]\r\n emissProbTable = [] \r\n for i in range(len(posTags)):\r\n temp1 = []\r\n for j in range(len(sentence)):\r\n if i == 1:\r\n temp1.append(0.0001)\r\n else:\r\n temp1.append(0.00000001)\r\n #temp1.append(0.01)\r\n emissProbTable.append(temp1)\r\n \r\n for x in range(len(sentence)):\r\n for y in range(len(words)):\r\n if sentence[x] == words[y]:\r\n for z in range(len(posTags)):\r\n if tags[y] == posTags[z]:\r\n ind = z\r\n break\r\n emissProbTable[ind][x] = emissProbTable[ind][x] + 1\r\n \r\n for k in range(len(cntTags)):\r\n for l in range(len(emissProbTable[0])):\r\n emissProbTable[k][l] = float(emissProbTable[k][l] / cntTags[posTags[k]])\r\n \r\n return emissProbTable\r\n\r\n# Predicting tags using simplified bayes net\r\n def simpleModel(self, sentence, words, tags, cntTags, PSi, emissProbTable):\r\n tagsList = []\r\n prob = []\r\n tagList = [key for key in cntTags]\r\n for i in range(len(sentence)):\r\n probList = []\r\n for j in range(len(tagList)):\r\n temp = emissProbTable[j][i] * PSi[tagList[j]]\r\n probList.append(temp)\r\n tagsList.append(tagList[probList.index(max(probList))])\r\n prob.append(max(probList))\r\n \r\n try:\r\n temp1 = math.log(sum(prob))\r\n except:\r\n temp1 = 1\r\n \r\n return tagsList, temp1\r\n \r\n# Predicting tags using Viterbi approach by calculating MA\r\n#This Viterbi Algorithm has been referenced from https://en.wikipedia.org/wiki/Viterbi_algorithm \r\n def viterbiModel(self, sentence, words, tags, cntTags, initProbs, tranProbTable, emissProbTable):\r\n prob = []\r\n tagList = [key for key in cntTags]\r\n predictions = [{}]\r\n for i in range(len(tagList)):\r\n predictions[0][i] = {\"prob\": initProbs[i][0] * emissProbTable[i][0], \"prev\": None}\r\n for j in range(1, len(sentence)):\r\n predictions.append({})\r\n for k in range(len(tagList)):\r\n maxTranProb = predictions[j-1][0][\"prob\"]*tranProbTable[k][0][0]\r\n prevState = 0\r\n for l in range(len(tagList[1:])):\r\n tranProb = predictions[j-1][l][\"prob\"]*tranProbTable[l][k][0]\r\n if tranProb > maxTranProb:\r\n maxTranProb = tranProb\r\n prevState = l\r\n\r\n max_prob = maxTranProb * emissProbTable[k][j]\r\n predictions[j][k] = {\"prob\": max_prob, \"prev\": prevState}\r\n \r\n temp = []\r\n maxProb = max(value[\"prob\"] for value in predictions[-1].values())\r\n previous = None\r\n for state, proba in predictions[-1].items():\r\n if proba[\"prob\"] == maxProb:\r\n temp.append(state)\r\n previous = state\r\n break\r\n for x in range(len(predictions)-2, -1, -1):\r\n temp.insert(0, predictions[x + 1][previous][\"prev\"])\r\n prob.insert(0, predictions[x + 1][previous][\"prob\"])\r\n previous = predictions[x + 1][previous][\"prev\"]\r\n\r\n predResult = []\r\n for y in temp:\r\n predResult.append(tagList[y])\r\n \r\n try:\r\n temp1 = math.log(maxProb)\r\n except:\r\n temp1 = 1\r\n \r\n return predResult, temp1\r\n\r\n# Predicting tags using Gibbs sampling approach\r\n def gibbsModel(self, sentence, words, tags, cntTags, tranProbTable, PSi, emissProbTable):\r\n tagList = [key for key in cntTags]\r\n result = ['noun'] * len(sentence)\r\n resultTable = []\r\n for n in range(1000):\r\n finalProb = 0\r\n for i in range(len(sentence)):\r\n temp1 = []\r\n if i == 0:\r\n for j in range(len(tagList)):\r\n probability = emissProbTable[j][i] * PSi[tagList[j]]\r\n temp1.append(probability)\r\n elif i == len(sentence) - 1:\r\n for j in range(len(tagList)):\r\n k = tagList.index(result[i-1])\r\n l = tagList.index(result[0])\r\n probability = emissProbTable[j][i]*PSi[tagList[j]]*tranProbTable[k][j][0]*tranProbTable[l][j][0]\r\n temp1.append(probability)\r\n else:\r\n for j in range(len(tagList)):\r\n k = tagList.index(result[i-1])\r\n probability = emissProbTable[j][i]*PSi[tagList[j]]*tranProbTable[k][j][0]\r\n temp1.append(probability)\r\n rand = random.uniform(0,1)\r\n temp2 = 0\r\n ind = temp1.index(max(temp1))\r\n for z in range(len(temp1)):\r\n temp1[z] = temp1[z] / sum(temp1)\r\n temp2 += temp1[z]\r\n if rand <= temp2:\r\n ind = z\r\n break\r\n result[i] = tagList[ind]\r\n if n == 999:\r\n try:\r\n prob = math.log(probability)\r\n except:\r\n prob = 1\r\n finalProb += prob\r\n \r\n temp = copy.copy(result)\r\n resultTable.append(temp)\r\n\r\n resultTable = np.array(resultTable)\r\n resultTableT = resultTable.T\r\n predTags = []\r\n for i in range(len(resultTableT)):\r\n TagCount = Counter(resultTableT[i])\r\n predTags.append(TagCount.most_common(1)[0][0])\r\n\r\n return predTags, finalProb/3.2\r\n\r\n def posterior(self, model, sentence, label, probS, probH, probC):\r\n if model == \"Simple\":\r\n return probS\r\n elif model == \"Complex\":\r\n return probC\r\n elif model == \"HMM\":\r\n return probH\r\n else:\r\n print(\"Unknown algo!\")\r\n\r\n \r\n def train(self, data):\r\n tags = []\r\n words = []\r\n for i in data:\r\n for j in i[1]:\r\n tags.append(j)\r\n for k in i[0]:\r\n words.append(k)\r\n cntTags = Counter(tags)\r\n initProbs = self.initProb(data)\r\n tranProbTable = self.transProb(tags, cntTags)\r\n PSi = self.probS(cntTags, tags)\r\n return words, tags, cntTags, initProbs, tranProbTable, PSi\r\n\r\n def simplified(self, sentence, words, tags, cntTags, PSi, emissProbTable):\r\n results, prob = self.simpleModel(sentence, words, tags, cntTags, PSi, emissProbTable)\r\n return results, prob\r\n\r\n def complex_mcmc(self, sentence, words, tags, cntTags, tranProbTable, PSi, emissProbTable):\r\n results, prob = self.gibbsModel(sentence, words, tags, cntTags, tranProbTable, PSi, emissProbTable)\r\n return results, prob\r\n \r\n def hmm_viterbi(self, sentence, words, tags, cntTags, initProbs, tranProbTable, emissProbTable):\r\n results, prob = self.viterbiModel(sentence, words, tags, cntTags, initProbs, tranProbTable, emissProbTable)\r\n return results, prob\r\n\r\n\r\n def solve(self, model, sentence, words, tags, cntTags, initProbs, tranProbTable, PSi, emissProbTable):\r\n if model == \"Simple\":\r\n return self.simplified(sentence, words, tags, cntTags, PSi, emissProbTable)\r\n elif model == \"Complex\":\r\n return self.complex_mcmc(sentence, words, tags, cntTags, tranProbTable, PSi, emissProbTable)\r\n elif model == \"HMM\":\r\n return self.hmm_viterbi(sentence, words, tags, cntTags, initProbs, tranProbTable, emissProbTable)\r\n else:\r\n print(\"Unknown algo!\")\r\n","sub_path":"pos_solver.py","file_name":"pos_solver.py","file_ext":"py","file_size_in_byte":9702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"244593068","text":"# 定义一个 my_layer.py\nimport torch\n\n\nclass MyLayer(torch.nn.Module):\n '''\n 因为这个层实现的功能是:y=weights*sqrt(x2+bias),所以有两个参数:\n 权值矩阵weights\n 偏置矩阵bias\n 输入 x 的维度是(in_features,)\n 输出 y 的维度是(out_features,) 故而\n bias 的维度是(in_fearures,),注意这里为什么是in_features,而不是out_features,注意体会这里和Linear层的区别所在\n weights 的维度是(in_features, out_features)注意这里为什么是(in_features, out_features),而不是(out_features, in_features),注意体会这里和Linear层的区别所在\n '''\n def __init__(self, in_features, out_features, bias=True):\n super(MyLayer, self).__init__() # 和自定义模型一样,第一句话就是调用父类的构造函数\n self.in_features = in_features\n self.out_features = out_features\n self.weight = torch.nn.Parameter(\n torch.Tensor(in_features,\n out_features)) # 由于weights是可以训练的,所以使用Parameter来定义\n if bias:\n self.bias = torch.nn.Parameter(\n torch.Tensor(in_features)) # 由于bias是可以训练的,所以使用Parameter来定义\n else:\n self.register_parameter('bias', None)\n\n def forward(self, input):\n input_ = torch.pow(input, 2) + self.bias\n y = torch.matmul(input_, self.weight)\n return y\n","sub_path":"maker_brb/test/my_layer.py","file_name":"my_layer.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"177682896","text":"#!/usr/bin/python3\n\"\"\"Packs web_static into a tgz archive using Fabric\"\"\"\nfrom fabric.api import *\nimport os\nfrom datetime import datetime\n\nenv.hosts = ['35.196.185.161', '104.196.150.129']\n\n\n@runs_once\ndef do_pack():\n \"\"\"Packs what is inside web_static into a tgz using the name\n `web_static_.tgz`\"\"\"\n\n time = datetime.now()\n created_at = str(time.year) + str(time.month) + str(time.day) +\\\n str(time.hour) + str(time.minute) + str(time.second)\n tgz_path = \"versions/web_static_{}.tgz\".format(created_at)\n\n command = \"tar -cvzf {} web_static\".format(tgz_path)\n\n if not os.path.exists(\"versions\"):\n os.makedirs(\"versions\")\n print(\"Packing web_static to {}\".format(tgz_path))\n result = local(command)\n if result.succeeded:\n return tgz_path\n else:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\"Uploads archive at archive_path to remote servers, unpacks archive and\n creates a symbolic link\"\"\"\n if not os.path.isfile(archive_path):\n return False\n\n filename = archive_path.split('/')[-1]\n tmp_arc_path = \"/tmp/\" + filename\n data_arc_path = \"/data/web_static/releases/{}\"\\\n .format(filename.split('.')[0])\n\n put(archive_path, tmp_arc_path)\n commands = [\"mkdir -p {}/\".format(data_arc_path),\n \"tar -xzf {} -C {}/\".format(tmp_arc_path, data_arc_path),\n \"rm \" + tmp_arc_path,\n \"mv {}/web_static/* {}/\".format(data_arc_path, data_arc_path),\n \"rm -rf {}/web_static\".format(data_arc_path),\n \"rm -rf /data/web_static/current\",\n \"ln -s {}/ /data/web_static/current\".format(data_arc_path)\n ]\n\n for command in commands:\n r = run(command)\n if r.failed:\n return False\n\n print(\"New version deployed!\")\n\n return True\n\n\ndef deploy():\n \"\"\"Performs do_pack() and do_deploy(archive_path)\"\"\"\n path = do_pack()\n if path is None and arc_path is None:\n return False\n if path is not None:\n arc_path = path\n\n return do_deploy(arc_path)\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"27336991","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: caroline\n@license: (C) Copyright 2019-2022, Node Supply Chain Manager Corporation Limited.\n@contact: caroline.fang.cc@gmail.com\n@software: pycharm\n@file: v1_chain_getCandidateAddrs.py\n@time: 2020/1/8 5:31 下午\n@desc:\n'''\n\nfrom app.src.API import request_Api\n\n'''14. chain_GetCandidateAddrs'''\n\n\ndef getCandidateAddrs(api_name, params):\n\t'''\n\t获取所有候选节点地址和对应的信任值\n\t:param api_name: chain_getCandidateAddrs\n\t:param params:地址\n\t:return: []\n\t示例代码\n\tcurl http://localhost:15645 -X POST --data '{\"jsonrpc\":\"2.0\",\"method\":\"chain_getCandidateAddrs\",\"params\":[\"\"], \"id\": 3}' -H \"Content-Type:application/json\"\n\t'''\n\t\n\ttry:\n\t\tresult = request_Api(api_name, params)\n\t\tprint(\"获取所有候选节点地址和对应的信任值成功,{}\".format(result))\n\t\treturn result\n\texcept Exception as e:\n\t\tprint(\"获取所有候选节点地址和对应的信任值失败,api返回错误,返回值为{}\".format(e))\n\t\treturn -1\n\n\nif __name__ == '__main__':\n\tapi_name = \"chain_getCandidateAddrs\"\n\tparams = [\"\"]\n\tgetCandidateAddrs(api_name, params)\n","sub_path":"app/src/chain_interface/chain_getCandidateAddrs.py","file_name":"chain_getCandidateAddrs.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"652345445","text":"import urllib.request, sys,base64,json,os,time,baiduSearch\r\nfrom PIL import Image\r\nfrom aip import AipOcr\r\nimport re\r\nfrom datetime import datetime\r\nimport config\r\n\r\nwhile True:\r\n start = time.time()\r\n timeStamp = str(int(datetime.now().timestamp()))\r\n #print('---------------time-------------',timeStamp)\r\n os.system(\"adb shell /system/bin/screencap -p /sdcard/screen.png\")\r\n os.system(\"adb pull /sdcard/screen.png ./screen\"+timeStamp+\".png\")\r\n\r\n \"\"\" (百度ocr)你的 APPID AK SK \"\"\"\r\n _OCR = config.ocr();\r\n APP_ID = _OCR.APP_ID\r\n API_KEY = _OCR.API_KEY\r\n SECRET_KEY = _OCR.SECRET_KEY\r\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n im = Image.open(r\"./screen\"+timeStamp+\".png\")\r\n\r\n img_size = im.size\r\n w = im.size[0]\r\n h = im.size[1]\r\n #print(\"xx:{}\".format(img_size))\r\n\r\n region = im.crop((70,250, w-70,1300)) #裁剪的区域\r\n #print(region)\r\n region.save(r\"./crop_test.png\")\r\n\r\n\r\n\r\n \"\"\" 读取图片 \"\"\"\r\n def get_file_content(filePath):\r\n with open(filePath, 'rb') as fp:\r\n return fp.read()\r\n image = get_file_content(r\"./crop_test.png\")\r\n #print(image)\r\n respon = client.basicGeneral(image)\r\n titles = respon['words_result'] #获取问题\r\n #print(titles)\r\n str_len = len(titles)\r\n # ques = str_len > 0 and titles[0]['words']\r\n answer1 = str_len > 1 and titles[str_len-3]['words']\r\n answer2 = str_len > 2 and titles[str_len-2]['words']\r\n answer3 = str_len > 3 and titles[str_len-1]['words']\r\n\r\n print('该题目可选答案:',answer1,',',answer2,',',answer3)\r\n # ans = ans +ques\r\n ans = ''\r\n for title in titles:\r\n ans = ans +title['words']\r\n\r\n #print(ques) #打印问题\r\n\r\n print(ans.split('?')[0])\r\n\r\n keyword = ans.split('?')[0] #识别的问题文本\r\n\r\n convey = 'n'\r\n\r\n # results = baiduSearch.search(keyword)\r\n\r\n if convey == 'y' or convey == 'Y':\r\n results = baiduSearch.search(keyword, convey=True)\r\n elif convey == 'n' or convey == 'N' or not convey:\r\n results = baiduSearch.search(keyword)\r\n else:\r\n print('输入错误')\r\n exit(0)\r\n count = 0\r\n for result in results:\r\n \tprint('{0}'.format(result.abstract)\r\n .replace('答案','\\033[1;31;40m答案\\033[0m')\r\n .replace('专业','\\033[1;31;40m专业\\033[0m')\r\n .replace('最佳','\\033[1;31;40m最佳\\033[0m')\r\n .replace(answer1,'\\033[1;32;40m'+answer1+'\\033[0m')\r\n .replace(answer2,'\\033[1;32;40m'+answer2+'\\033[0m')\r\n .replace(answer3,'\\033[1;32;40m'+answer3+'\\033[0m')\r\n )\r\n \tcount=count+1\r\n \tif(count == 12): #这里限制了只显示2条结果,可以自己设置\r\n \t\tbreak\r\n\r\n end = time.time()\r\n print('程序用时:'+str(end-start)+'秒')\r\n\r\n go = input('输入回车继续运行,输入 n 回车结束运行: ')\r\n if go == 'n':\r\n break\r\n\r\n print('------------------------')","sub_path":"hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"438359381","text":"weather_c = {\n \"Monday\": 12,\n \"Tuesday\": 14,\n \"Wednesday\": 15,\n \"Thursday\": 14,\n \"Friday\": 21,\n \"Saturday\": 22,\n \"Sunday\": 24,\n}\n# 🚨 Don't change code above 👆\n\n\n# Write your code 👇 below:\nweather_f = {day: temp_c * 9/5 + 32 for (day, temp_c) in weather_c.items()}\n\n\nprint(weather_f)\n\n# iterate over a pandas dataframe\n# student_dict = {\n# \"student\": [\"Angela\", \"James\", \"Lily\"],\n# \"score\": [56, 76, 98]\n# }\n#\n# import pandas\n#\n# student_data_frame = pandas.DataFrame(student_dict)\n# print(student_data_frame)\n#\n# for (index, row) in student_data_frame.iterrows():\n# print(row.student)","sub_path":"Day71/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"341705405","text":"\n'''This script datamines the reports by simple python operations to find smell related words in the reports and categorises by smell types.\n\nCan print categorised and uncategorised.\n\nOnly nltk sentence tokenizer is used.\n\nUses prettyprint to display the results nicely.\n'''\n\n\nfrom map import mapping\n# walk through the os and get all files\n# read each file in tern and go through line by line\n# print lines that contain smell and the report name\nfrom os import listdir\nimport nltk.data\nimport json\nfrom pprint import pprint as pp\nfrom collections import defaultdict\nimport dataset\n\nSMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium', 'effluvia']\nREPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'\n\n\nclass SmellType(object):\n\n def __init__(self, name, synonyms):\n self.name = name\n self.synonyms = synonyms\n\nclass Smell(object):\n\n def __init__(self, borough, category, sentence, year):\n self.borough = borough\n self.category = category\n self.sentence = sentence\n self.year = year\n def __repr__(self):\n return \"Smell(%s, %s, %s, %s)\" % (repr(self.borough), repr(self.category), repr(self.sentence), repr(self.year))\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n# TEMPLATE: category = SmellType('category_name', ['synonym1', 'synonym2'])\n# TODO: Create smell categories here\nsewer = SmellType('sewer', ['sewer', 'drain', 'sewage', 'manhole', 'gully', 'cesspool'])\nwaste_rubbish = SmellType('waste_rubbish', ['refuse', 'waste', 'rubbish'])\nwaste_excrement = SmellType('waste_excrement', ['excrement', 'dung'])\nfood = SmellType('food', ['food', 'vegetable', 'cake', 'milk', 'butter', 'icing', 'preserve', 'cooking', 'veal', 'sausages', 'cow'])\ntrades = SmellType('trades', ['trade', 'glue', 'gum', 'fat', 'oil', 'fellmongers', 'manure manufacture', 'ferment', 'butchers', 'burning'])\nanimal = SmellType('animal', ['animal', 'piggeries', 'manure', 'excrement', 'cowhouse'])\nfactory_fuel = SmellType('factory_fuel', ['factory', 'rubber' 'naphtha', 'fuel', 'works'])\nschool = SmellType('school', ['school', 'lavatories', 'discharging ears'])\ngas = SmellType('gas', ['gas', 'carbonic acid', 'vapours', 'sulphide'])\ndecomposition = SmellType('decomposition', ['mortuary', 'dead', 'church', 'chapel'])\nno_smell = SmellType('no_smell', ['no offensive smell', 'no offensive odour', 'no disagreeable smell'])\n\ndef get_file_names():\n '''Retrieve file names'''\n fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]\n return fileNames\n\n\ndef tokenize_to_sentence(sentence):\n parser = nltk.data.load('tokenizers/punkt/english.pickle')\n # split into sentences\n result = parser.tokenize(sentence.strip())\n return result\n\n\ndef saveObject(results):\n '''Save results dictionary as file'''\n with open('processed_results.txt', 'w') as outfile:\n json.dump(results, outfile)\n\n\ndef get_categorised_results(results_list, categories):\n for category in categories:\n for result in results_list:\n if result.category == category:\n pp({category: {result.borough: {result.year:result.sentence}}})\n\ndef get_uncategorised_results(results_list):\n for result in results_list:\n pp({'uncategorised': {result.borough: {result.year:result.sentence}}})\n\ndef get_all_results(categorised_results, uncategorised_results):\n results = categorised_results + uncategorised_results\n for result in results:\n pp({result.borough: {result.year:result.sentence}})\n\n\nclass SmellDataMine(object):\n\n def __init__(self):\n self.smellTypes = [sewer, waste_rubbish, waste_excrement, trades, school, gas, factory_fuel, decomposition, animal]\n self.fileNames = get_file_names()\n self.results = []\n self.db = dataset.connect('sqlite:///database')\n self.uncategorised = []\n\n def save_to_database(self, results):\n # create table\n table = self.db['smells']\n for result in results:\n try:\n table.insert({'Category': result.category,\n 'Borough': result.borough,\n 'Year': result.year,\n 'Sentence': result.sentence})\n except:\n print(result)\n\n def run(self):\n for fileName in self.fileNames[:2000]:\n self.process_file(fileName)\n\n def getMeta(self, fileName):\n splitReport = fileName.split('.')\n bID = splitReport[2]\n year = splitReport[1]\n try:\n region = mapping[bID]\n except:\n # TODO there is a problem with mappings e.g Acton.1915.b19783905.txt. Region cannot be found\n print(fileName)\n return (None, None)\n return year, region\n\n def process_file(self, fileName):\n path = REPORTS_DIR + '/' + fileName\n # references = []\n year, region = self.getMeta(fileName)\n if not all([year, region]):\n return\n self.process_file_with_metadata(fileName, year, region)\n\n\n def process_file_with_metadata(self, path, year, region):\n with open(path) as f:\n for line in f:\n # break into sentences\n report_tokenized = tokenize_to_sentence(line)\n\n for sentence in report_tokenized:\n for word in SMELL_WORDS:\n if word in sentence.lower():\n categories = self.categorise_sentence(sentence)\n if categories:\n for category in categories:\n o = Smell(region, category, sentence, year)\n self.results.append(o)\n else:\n o = Smell(region, 'Uncategorised', sentence, year)\n self.uncategorised.append(o)\n\n def categorise_sentence(self, sentence):\n \n results = set()\n for category in self.smellTypes:\n for synonym in category.synonyms:\n if synonym in sentence.lower():\n results.add(category.name)\n return results\n \n\n\n\ndef main():\n runner = SmellDataMine()\n runner.run()\n # print(runner.results)\n # categories = [category.name for category in runner.smellTypes]\n\n # run this to get categorised smell results\n # get_categorised_results(runner.results, categories)\n\n # run this to get uncategorised smell results\n # get_uncategorised_results(runner.uncategorised)\n\n # run this to get all smell results\n # get_all_results(runner.results, runner.uncategorised)\n\n results = runner.results + runner.uncategorised\n print(results)\n print(len(results))\n # runner.save_to_database(results)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"NLTK_textmine/smell_datamine_categorized.py","file_name":"smell_datamine_categorized.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"615138907","text":"from selenium import webdriver\nfrom time import sleep\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom random import randint\n# from selenium import WebDriverWait\nfrom requests.utils import quote\nfrom bs4 import BeautifulSoup\nimport json\nfrom fuzzywuzzy import fuzz\nfrom selenium.webdriver.chrome.options import Options\n\n\n# # Creation of a new instance of Google Chrome\n#driver = webdriver.Chrome(executable_path=r'C:\\Users\\EGE\\Desktop\\ENS491\\career path\\chromedriver_win32\\chromedriver.exe') \n\nopts = Options()\nopts.add_argument(\"user-agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.60'\")\n\ndriver = webdriver.Chrome(options=opts, executable_path=r'C:\\Users\\EGE\\Desktop\\ENS491\\career path\\chromedriver_win32\\chromedriver.exe')\n\n# Load the page on the browser\ndriver.get('https://www.linkedin.com')\n\nusername = driver.find_element_by_id('session_key')\nusername.send_keys('egealperdemirkaya@gmail.com')\nsleep(4.3)\n\n# locate password form by_class_name\npassword = driver.find_element_by_id('session_password')\n# send_keys() to simulate key strokes\npassword.send_keys('ege97alper99')\nsleep(6.2)\n\nsleep(0.5)\n# locate submit button by_class_name\nlog_in_button = driver.find_element_by_class_name('sign-in-form__submit-button')\n\nlog_in_button.click()\nwait = randint(7,10)\nsleep(wait)\n\ndata = {}\n\n\nf = open('top50_uni_facet.json','r')\nuni_names = json.load(f)\n\nuni_list=[]\n\nfor uni_name in uni_names:\n #search = driver.find_element_by_class_name('search-global-typeahead__input always-show-placeholder')\n\n \n univ = quote(uni_name)\n #driver.get('https://www.linkedin.com/search/results/schools/?keywords=%C4%B0zmir%20K%C3%A2tip%20%C3%87elebi%20%C3%9Cniversitesi&origin=SWITCH_SEARCH_VERTICAL')\n driver.get('https://www.linkedin.com/search/results/schools/?keywords='+univ+'&origin=SWITCH_SEARCH_VERTICAL')\n #search_query = driver.find_element_by_name('q')\n wait = randint(3,5)\n sleep(wait)\n\n # selected_uni = driver.find_elements_by_class_name(\"entity-result__title-line.flex-shrink-1.entity-result__title-text--black\")\n selected_uni = driver.find_elements_by_class_name(\"entity-result__title-text.t-16\")\n \n try:\n if uni_name.strip() == 'Bahcesehir Universitesi':\n raise Exception(\"\")\n\n selected_uni[0].click()\n\n sleep(3)\n \n driver.get(str(driver.current_url)+\"people/\")\n\n #print(selected_uni)\n wait =randint(3,5)\n sleep(wait)\n\n select_uni_name = driver.find_element_by_class_name(\"org-top-card-summary__title.t-24.t-black.t-bold.truncate\").text\n print(\"uni name in file:\", uni_name)\n print(\"uni name in linkedin:\", select_uni_name)\n print(\"---------------------------------\")\n \n alumni_number = int(((driver.find_element_by_class_name(\"t-20.t-black.t-bold\").text).split(\" \")[0]).replace(',', ''))\n \n uni_list.append({\"uni_name\": uni_name, \"alumni_count\": alumni_number})\n\n wait = randint(3,5)\n sleep(wait)\n except Exception as e:\n print(e)\n wait = randint(1,3)\n sleep(wait)\n print(uni_name, \" could not found\")\n #break\n\nwith open(\"universities_alumni_number.json\", \"w\", encoding=\"utf8\") as write_file:\n json.dump(uni_list, write_file,ensure_ascii=False)\n\n\n","sub_path":"Find uni alumni number/uni_find_alumni_number.py","file_name":"uni_find_alumni_number.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"192233879","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nfrom const import PAD\n\n\ndef _lcs(x, y):\n n = len(x)\n m = len(y)\n table = dict()\n\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n\n def recon(i, j):\n if i == 0 or j == 0:\n return []\n elif x[i - 1] == y[j - 1]:\n return recon(i - 1, j - 1) + [x[i - 1]]\n elif table[i - 1, j] > table[i, j - 1]:\n return recon(i - 1, j)\n else:\n return recon(i, j - 1)\n\n return len(recon(n, m)), n, m\n\n\ndef rouge_l(evals, refs):\n assert evals.size() == refs.size()\n use_cuda = evals.is_cuda\n\n evals, refs = map(lambda x: x.data.cpu().numpy(), [evals, refs])\n\n scores = []\n for eva, ref in zip(evals, refs):\n same_len, eva_len, ref_len = map(float,\n _lcs(eva, ref[np.where(ref > PAD)]))\n\n r_lcs, p_lcs = same_len / ref_len, same_len / eva_len\n\n beta = p_lcs / (r_lcs + 1e-12)\n f_lcs = ((1 + (beta**2)) * r_lcs * p_lcs) / \\\n (r_lcs + ((beta**2) * p_lcs) + 1e-12)\n scores.append(f_lcs)\n\n scores = np.asarray(scores, dtype=np.float32)\n scores = Variable(torch.from_numpy(scores), requires_grad=False)\n\n if use_cuda:\n scores = scores.cuda()\n\n return scores\n\n\ndef mask_score(props, words, scores):\n assert words.size() == scores.size()\n mask = (words > 0).float()\n\n return props * scores * mask\n\n\nif __name__ == '__main__':\n import torch\n from torch.autograd import Variable\n import torch.nn.functional as F\n\n data = Variable(torch.LongTensor([[3, 1, 2, 3, 1, 0], [2, 3, 4, 4, 0, 0]]))\n label = Variable(torch.LongTensor(\n [[3, 1, 2, 3, 1, 0], [2, 3, 2, 3, 1, 0]]))\n bl = Variable(torch.LongTensor([[3, 1, 2, 3, 2, 0], [1, 3, 4, 4, 0, 0]]))\n data = data.cuda()\n label = label.cuda()\n bl = bl.cuda()\n\n reward = rouge_l(bl, label) - rouge_l(data, label)\n print(reward)\n\n props = torch.randn(16, 17, 256)\n words = torch.LongTensor([[i for i in range(16, -1, -1)]\n for _ in range(16)])\n scores = torch.randn(16, 17)\n\n print(mask_score(props, words, scores))\n","sub_path":"Image-Cap/rouge.py","file_name":"rouge.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"70238842","text":"import re\r\n\r\n\r\nclass Stack(object):\r\n '''定义栈类'''\r\n\r\n def __init__(self):\r\n self.list = []\r\n\r\n '''判断栈空'''\r\n\r\n def is_empty(self):\r\n return self.list == []\r\n\r\n '''在栈顶添加元素'''\r\n\r\n def push(self, data):\r\n self.list.append(data)\r\n\r\n '''弹出栈顶元素'''\r\n\r\n def pop(self):\r\n return self.list.pop()\r\n\r\n '''取栈顶元素,不修改栈内容'''\r\n\r\n def peek(self):\r\n return self.list[-1]\r\n\r\n '''栈大小'''\r\n\r\n def size(self):\r\n return len(self.list)\r\n\r\n\r\nf = open(r\"D:\\1.txt\", \"r\", encoding=\"utf-8\")\r\ncount = len(f.readlines()) + 1\r\nf.seek(0)\r\nnum5 = 0\r\nnum6 = 0\r\nflag2 = 0\r\nflag1 = 0\r\nstr1 = \"if\"\r\nstr2 = \"else\"\r\nstr3 = \"{\"\r\nstr4 = \"}\"\r\nstr5 = \"else if\"\r\nstack = Stack()\r\nxun = 1\r\nfor i in range(count):\r\n data1 = f.readline().strip()\r\n m1 = re.findall(r\"\\bif\\b\", data1)\r\n m2 = re.findall(r\"\\belse\\b\", data1)\r\n if (len(m1) != 0) and (len(m2) != 0):\r\n stack.push(str5)\r\n if (len(m1) != 0) and (len(m2) == 0):\r\n stack.push(str1)\r\n if (len(m2) != 0) and (len(m1) == 0):\r\n if stack.peek() == str1:\r\n stack.pop()\r\n num5 += 1\r\n if stack.peek() == str5:\r\n while xun == 1:\r\n if (stack.peek() == str1):\r\n stack.pop()\r\n num6 += 1\r\n break\r\n else:\r\n stack.pop()\r\n result = str3 in data1\r\n if (result == True) and (flag1 == 1):\r\n stack.push(str3)\r\n if (result == True) and (flag1 == 0):\r\n flag1 = 1\r\n stack.push(str3)\r\n\r\n result = str4 in data1\r\n if result == True:\r\n if stack.peek() == str5:\r\n while xun == 1:\r\n if (stack.peek() == str1):\r\n stack.pop()\r\n break\r\n else:\r\n stack.pop()\r\n if stack.peek() == str3:\r\n stack.pop()\r\n\r\nf.close()\r\nprint(num5)\r\nprint(num6)\r\n","sub_path":"第4部分.py","file_name":"第4部分.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"567252033","text":"# code based on https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/multilayer_perceptron.py\nfrom __future__ import print_function\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport random\nfrom load_data import LoadData as GetCIFARData\n\nFILE_NAME = 'CIFAR_HIDDEN2_NN'\ncifar = GetCIFARData(one_hot=True)\n\n# Parameters\nn_input = 32*32*3 # my CIFAR data input (img shape = 32*32)\nn_classes = 5 # my CIFAR total classes (0-5)\nstarter_learning_rate = .0001\nepochs = 2000\nbatch_size = 100\ndisplay_step = 200\n\ndef SaveAccuracy(name, errors, a):\n\timport simplejson\n\tf = open( os.path.join(\"accuracies\", name), \"w\" )\n\tf.write(\"%2.4f\" % a)\n\tf.write(\"\\n\\n\")\n\tsimplejson.dump(errors, f)\n\tf.close()\n\ndef SaveErrorPlot(name, title, errors):\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure()\n\tplt.plot(errors)\n\n\tplt.suptitle(title, fontsize=20)\n\tplt.xlabel('epoch')\n\tplt.ylabel('average error')\n\t\n\tplt.savefig( os.path.join(\"plots\", name) + \".png\")\n\ndef InitWeightMaker(init):\n\tcache = {'init': init}\n\tdef MakeWeight(out):\n\t\tinp = cache['init']\n\t\tcache['init'] = out\n\t\treturn tf.Variable( tf.random_normal([inp, out]) )\n\treturn MakeWeight\n\nfor factor1 in [2, 4, 8, 16]:\n\tfor factor2 in [2, 4, 8, 16]:\n\t\tif factor1 >= factor2:\n\t\t\tcontinue\n\n\t\t# Network Paramters\n\t\thidden1 = n_input/factor1 # number of features in 1st hidden layer\n\t\thidden2 = n_input/factor2 # number of features in 2nd hidden layer\n\t\tprint (\"Now processing with h1=\", hidden1, \"h2=\", hidden2)\n\n\t\t###############\n\t\t# BUILD MODEL #\n\t\t###############\n\t\twith tf.device('/gpu:0'):\n\n\t\t\t# tf Graph input\n\t\t\tx = tf.placeholder(\"float\", shape=[None, n_input])\n\t\t\tlabels = tf.placeholder(\"float\", shape=[None, n_classes])\n\n\t\t\tMakeWeight = InitWeightMaker(n_input)\n\t\t\tMakeBias = lambda x : tf.Variable( tf.random_normal([x]) )\n\t\t\tMakeWeightBias = lambda x: ( MakeWeight(x), MakeBias(x) )\n\t\t\tWeightedOutput = lambda i, w, b: tf.add( tf.matmul(i, w), b )\n\t\t\tActivate = lambda x: tf.nn.relu(x)\n\n\t\t\tW1, b1 = MakeWeightBias(hidden1)\n\t\t\tlayer1 = Activate( WeightedOutput(x, W1, b1) )\n\n\t\t\tW2, b2 = MakeWeightBias(hidden2)\n\t\t\tlayer2 = Activate( WeightedOutput(layer1, W2, b2) )\n\n\t\t\tW3, b3 = MakeWeightBias(n_classes)\n\t\t\tlogits = WeightedOutput(layer2, W3, b3)\n\t\t\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))\n\n\t\t\t# Define loss and optimizer\n\t\t\tglobal_step = tf.placeholder(tf.int32, [])\n\t\t\tlearning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, epochs/2, .5)\n\t\t\tbackprop = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n\t\t\t##############################\n\t\t\t####### EVALUATE MODEL #######\n\t\t\t##############################\n\n\t\t\t# Launch the graph\n\t\t\terrors = []\n\t\t\tmyaccuracy = None\n\t\t\twith tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n\t\t\t\t# Initializing the variables\n\t\t\t\tinit = tf.global_variables_initializer()\n\t\t\t\tsess.run(init)\n\t\t\t\t\n\t\t\t\t# Training cycle\n\t\t\t\tfor epoch in range( epochs ):\n\t\t\t\t\tavg_cost = 0\n\t\t\t\t\ttotal_batch = int( cifar.train.num_examples/batch_size )\n\n\t\t\t\t\t# Loop over all batches\n\t\t\t\t\tfor i in range(total_batch):\n\t\t\t\t\t\tbatch_x, batch_y = cifar.train.next_batch(batch_size)\n\t\t\t\t\t\t_, c = sess.run([backprop, cost], \n\t\t\t\t\t\t\t\t\t\tfeed_dict={x:batch_x, labels:batch_y, global_step:epoch})\n\t\t\t\t\t\tavg_cost += c/total_batch\n\t\t\t\t\t\n\t\t\t\t\t# Display logs per epoch step\n\t\t\t\t\terrors.append(avg_cost)\n\t\t\t\t\tif epoch % display_step == 0:\n\t\t\t\t\t\tprint(\"Epoch: \", '%4d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\t\t\t\tprint(\"Optimization Finished!\")\n\n\t\t\t\tcorrect_prediction = tf.equal( tf.argmax(logits, 1), tf.argmax(labels, 1) )\n\t\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\t\t\t\tmyaccuracy = accuracy.eval({x: cifar.test.images, labels: cifar.test.labels})\n\t\t\t\tprint(\"Accuracy: \", myaccuracy)\n\t\t\t\n\t\t\t########\n\t\t\t# SAVE #\n\t\t\t########\n\t\t\tname = FILE_NAME + \"-h1=\" + repr(hidden1) + \"-h2=\" + repr(hidden2)\n\t\t\ttitle = \"Two hidden layers h1 and h2 where h1=\" + repr(hidden1) + \" h2=\" + repr(hidden2) \n\t\t\tSaveAccuracy(name, errors, myaccuracy)\n\t\t\tSaveErrorPlot(name, title, errors)\n\t\t\tprint (\"DONE with \" + name)\n\nprint (\"DONE with processing \" + FILE_NAME)","sub_path":"cifarnnh2.py","file_name":"cifarnnh2.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"170523829","text":"import hotspot\nfrom iops_data_2019 import phase1\nfrom hotspot.forecast import get_sum_cuboid\nimport csv\nimport ray\nfrom tqdm import tqdm\nimport copy\nimport time\nimport numpy as np\nimport pandas as pd\n\nDATA_DIR = \"../iops-data-2019/iops_data_2019/data\"\nDATA_CSV_DIR = DATA_DIR + \"/2019AIOps_data_test1\"\nDATA_FORE_DIR = DATA_DIR + \"/2019AIOps_data_test1_forecast\"\nTIMESTAMP = DATA_DIR + \"/Anomalytime_data_test1.csv\"\n\nM = 10\nPT = 0.95\nT_EEP = 0.2\nT_EP = 0.8\nT_S0 = 0\n\nray.init()\n\n\ndef dict2output(root_cause):\n output = \"\"\n for item in root_cause:\n if len(output) != 0:\n output += \"&\"\n output += root_cause[item]\n return output\n\n\ndef dictlist2output(root_cause_list):\n output = \"\"\n for root_cause in root_cause_list:\n if len(output) != 0:\n output += \";\"\n output += dict2output(root_cause)\n return output\n\n\nread_time = csv.reader(open(TIMESTAMP, encoding='utf-8'))\n'''\nresult_write_MCTS = open(\"test1_result_AMCTS.csv\", \"w\")\nwriter_MCTS = csv.writer(result_write_MCTS)\nwriter_MCTS.writerow([\"timestamp\", \"set\"])\n\nresult_write_hotspot = open(\"test1_result_hotspot.csv\", \"w\")\nwriter_hotspot = csv.writer(result_write_hotspot)\nwriter_hotspot.writerow([\"timestamp\", \"set\"])\n'''\nresult_write_hotspot_FullSearch = open(\"test1_result_hotspot_FullSearch.csv\", \"w\")\nwriter_hotspot_FullSearch = csv.writer(result_write_hotspot_FullSearch)\nwriter_hotspot_FullSearch.writerow([\"timestamp\", \"set\"])\nwriter_cause_top = csv.writer(open(\"top_cause_FullSearch.csv\", \"w\"))\n\nfor anomalytime in tqdm(read_time):\n if anomalytime[0] == \"timestamp\":\n continue\n interval = 300*1000\n fore_list = []\n real_file = DATA_CSV_DIR + \"/\" + anomalytime[0] + \".csv\"\n real_frame = pd.read_csv(real_file, names=[\"i\", \"e\", \"c\", \"p\", \"l\", \"indicator\"])\n real_frame = real_frame[real_frame[\"indicator\"] > 0]\n print(\"length of real frame is {}\".format(len(real_frame)))\n fore_file = DATA_FORE_DIR + \"/f\" + anomalytime[0] + \".csv\"\n fore_frame = pd.read_csv(fore_file, names=[\"i\", \"e\", \"c\", \"p\", \"l\", \"indicator\"])\n\n '''\n best_set, max_Q = hotspot.AMCTS.adtributor_MCTS(fore_frame, real_frame, M, PT, T_EEP, T_EP)\n result_dict = [phase1.l2d(cause) for cause in best_set]\n output1 = dictlist2output(result_dict)\n writer_MCTS.writerow([anomalytime[0], output1])\n\n best_set2, max_Q2 = hotspot.AMCTS.adtributor_hotspot(fore_frame, real_frame, M, PT, T_EEP, T_EP)\n result_dict2 = [phase1.l2d(cause) for cause in best_set2]\n output2 = dictlist2output(result_dict2)\n writer_hotspot.writerow([anomalytime[0], output2])\n '''\n best_set3, max_Q3, cause_topk = hotspot.AMCTS.adtributor_hotspot_FullSearch(fore_frame, real_frame, M, PT, T_EEP, T_EP, T_S0)\n result_dict3 = [phase1.l2d(cause) for cause in best_set3]\n output3 = dictlist2output(result_dict3)\n writer_hotspot_FullSearch.writerow([anomalytime[0], output3])\n\n print(\"length of cause_topk is {}\".format(len(cause_topk)))\n if len(cause_topk) >= 3:\n print(\"cause_top3 is {}\".format(cause_topk[:3]))\n writer_cause_top.writerow([anomalytime[0], cause_topk])\n\n\ndef forecast_frame_test(df_list):\n print('forecasting')\n location = 2\n window = len(df_list)\n start_time = time.time()\n sum_indicator = df_list[0]['indicator']\n for i in range(1, window):\n sum_indicator = [a+b for a, b in zip(sum_indicator, df_list[0]['indicator'])]\n indicator = [a/window for a in sum_indicator]\n forecast_ma = df_list[location][df_list[0].columns[:-1]]\n forecast_ma['indicator'] = indicator\n '''\n for i in range(len(forecast_ma)):\n hist_data = [0] * window\n itemset = forecast_ma.iloc[i][forecast_ma.columns[:-1]].tolist()\n for wd in range(window):\n hist_data[wd] = get_sum_cuboid(df_list[location-int(window/2)+wd], itemset)\n forecast_ma.iloc[i, -1] = np.mean(hist_data)\n '''\n print('forecast is done')\n print(\"--- {} seconds ---\".format((time.time() - start_time)))\n return forecast_ma\n\n\ndef forecast_frame_GetSumCuboid(df_list, real):\n start_time = time.time()\n location = 2\n window = len(df_list)\n\n forecast_ma = copy.deepcopy(real)\n for i in range(len(forecast_ma)):\n hist_data = [0] * window\n itemset = forecast_ma.iloc[i][forecast_ma.columns[:-1]].tolist()\n for wd in range(window):\n hist_data[wd] = get_sum_cuboid(df_list[location-int(window/2)+wd], itemset)\n forecast_ma.iloc[i, -1] = np.mean(hist_data)\n print(\"forecast time is: {}\".format(time.time() - start_time))\n return forecast_ma\n","sub_path":"Code/hotspot/AnomalyTest1.py","file_name":"AnomalyTest1.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"560918512","text":"\"\"\"Added weight field to housedata model\n\nRevision ID: fa55d611d59d\nRevises: 3f45c9122778\nCreate Date: 2020-12-02 09:40:45.507212\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fa55d611d59d'\ndown_revision = '3f45c9122778'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('animal_health_house_data', sa.Column('weight', sa.Float(precision=2), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('animal_health_house_data', 'weight')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/fa55d611d59d_added_weight_field_to_housedata_model.py","file_name":"fa55d611d59d_added_weight_field_to_housedata_model.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"94626008","text":"#PCA 脸部图像\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.io import loadmat\nfrom sklearn.decomposition import PCA\ndef plot_n_iamge(X,n):\n pic_size=int(np.sqrt(X.shape[1]))\n grid_size=int(np.sqrt((n)))\n first_n_image=X[:n,:]\n fig,ax_array=plt.subplots(nrows=grid_size,ncols=grid_size,sharex=True,sharey=True,figsize=(8,8))\n print(first_n_image.shape)\n a=0\n for r in range(grid_size):\n for c in range(grid_size):\n ax_array[r,c].imshow(first_n_image[grid_size*r+c,:].reshape((pic_size,pic_size)))\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\ndef covariance_matrix(X):\n m=X.shape[0]\n return (X.T@X)/m\ndef normalize(X):\n X_copy=X[:]\n m,n=X_copy.shape\n for col in range(n):\n X_copy[:,col]=(X_copy[:,col]-X_copy[:,col].mean())/X_copy[:,col].std()\n return X_copy\n\ndef pac(X):\n #标准化 for each row ,X-mean/std\n X_std=normalize(X)#m n\n m=X_std.shape[0]\n #计算协方差矩阵\n Sigma=covariance_matrix(X_std)#n n\n U,S,A=np.linalg.svd(Sigma)#U:n*n\n return U,S,A\ndef project_data(X,U,k):\n m,n=X.shape\n if k>n:\n raise ValueError('k should be lower dimension of n')\n return X@U[:,:k]#投影\ndef plot_n_image(X,n):\n \"\"\"\n plot first n images\n n has to be a square number\n :param X:\n :param n:\n :return:\n \"\"\"\ndef recover_data(Z,U):\n m,n=Z.shape\n if n>=U.shape[0]:\n raise ValueError('Z demension is >== U,you should recover from lover dimension to higher')\n return Z@U[:,:n].T\nif __name__ == '__main__':\n faces=loadmat('data/ex7faces.mat')\n #读取人脸数据,并将图像扶正\n X=np.array([x.reshape((32,32)).T.reshape(1024) for x in faces.get('X')])#5000 1024 每1024是一张脸\n # plot_n_iamge(X,n=64)\n # plt.show()\n U,S,_=pac(X)\n Z=project_data(X,U,k=100)\n # plot_n_iamge(Z,n=64)\n # plt.show()\n X_recover=recover_data(Z,U)\n # plot_n_iamge(X_recover,n=64)\n # plt.show()\n # #sklearn PCA\n k=100\n sk_pac=PCA(n_components=k)\n Z=sk_pac.fit_transform(X)\n # plot_n_iamge(Z,64)\n # plt.show()\n X_recover=sk_pac.inverse_transform(Z)\n plot_n_iamge(X_recover,64)\n plt.show()\n #计算平均均方误差与训练集误差的比例\n sum1=0\n sum2 = 0\n for i in range(k):\n sum1+=S[i]\n for i in range(S.shape[0]):\n sum2+=S[i]\n print(sum1/sum2)","sub_path":"ex7/ex7_4.py","file_name":"ex7_4.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"13612200","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/5/26 下午8:22\n# @Author : pengyuan.li\n# @Site : \n# @File : 20200526_find_duplicate.py\n# @Software: PyCharm\n\nfrom typing import List\n\n\nclass Solution:\n # 给定一个包含 n + 1 个整数的数组 nums,其数字都在 1 到 n 之间(包括 1 和 n),可知至少存在一个重复的整数。假设只有一个重复的整数,找出这个重复的数。\n def findDuplicate(self, nums: List[int]) -> int:\n nums_dict = {}\n for num in nums:\n nums_dict[num] = nums_dict.setdefault(num, 0) + 1\n if nums_dict[num] > 1:\n return num\n\n\nif __name__ == \"__main__\":\n ss = Solution()\n nums = [1, 3, 4, 2, 2]\n print(ss.findDuplicate(nums))\n","sub_path":"leetcode/daily_question/20200526_find_duplicate.py","file_name":"20200526_find_duplicate.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"347889171","text":"from pypika import Query as q, Field as f\nfrom functools import lru_cache\n\n\ndef getTablesWithColsQry(colNames, schema=None):\n \"\"\"\n \n :param list/str colNames:\n :param database.oracle.OraConnection conn: \n \n Example usage:\n colNames = ['col1'] \n schema = 'myuser'\n \"\"\"\n if isinstance(colNames, str): #takes str or list\n colNames = [colNames] \n \n query = q().from_(\n 'ALL_TAB_COLUMNS'\n ).distinct().select(\n 'TABLE_NAME'\n ).where(\n f('COLUMN_NAME').isin(colNames)\n )\n \n if schema:\n query = query.where(f('OWNER') == schema)\n \n res = query.get_sql() #with_quotes=False)\n return res\n\n@lru_cache()\ndef getTablesWithCols(conn, colNames, **kwargs):\n \"\"\"\n \n :param dict kwargs: see getTablesWithColsQry\n :returns pandas.Series:\n \"\"\"\n sql = getTablesWithColsQry(colNames, **kwargs)\n df = conn.getdf(sql)\n return df.ix[:,0]\n\n\ndef createViewsFromAnotherSchema(tables, srcSchema):\n localVars = locals()\n sqls = [\"create or replace view {0} as select * from {srcSchema}.{0}\".format(table, **localVars) for table in tables]\n return sqls\n\n\ndef grantTables(tables, user='PUBLIC', priv='select'):\n localVars = locals()\n sqls = [\"grant {priv} on {0} to {user}\".format(table, **localVars) for table in tables]\n return sqls \n\n################################### TEST #######################\n\ndef testGetTablesWithCols():\n from database.oracle.helpers import getTablesWithCols\n from database.oracle.core import connect\n conn = connect()\n getTablesWithCols(conn, 'CALC_SYSTEM_DATE')\n","sub_path":"database/oracle/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"101210647","text":"import torch\nfrom torch.nn.utils import clip_grad\n\nfrom ..core_hook import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass OptimizerHook(Hook):\n def __init__(self, grad_clip=None, fp16=False):\n self.grad_clip = grad_clip\n self.fp16 = fp16\n self.scaler = torch.cuda.amp.GradScaler()\n\n def clip_grads(self, params):\n params = list(filter(lambda p: p.requires_grad and p.grad is not None, params))\n if len(params) > 0:\n return clip_grad.clip_grad_norm_(params, **self.grad_clip)\n\n def after_train_iter(self, runner):\n runner.optimizer.zero_grad()\n if self.fp16:\n self.scaler.scale(runner.outputs[\"loss\"]).backward()\n else:\n runner.outputs[\"loss\"].backward()\n if self.grad_clip is not None:\n grad_norm = self.clip_grads(runner.model.parameters())\n if grad_norm is not None:\n # Add grad norm to the logger\n runner.log_buffer.update(\n {\"grad_norm\": float(grad_norm)}, runner.outputs[\"num_samples\"]\n )\n\n if self.fp16:\n self.scaler.step(runner.optimizer)\n self.scaler.update()\n else:\n runner.optimizer.step()\n","sub_path":"mvt/cores/hook/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"308721812","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy as sp\n\nimport datetime\n\n\n# In[2]:\n\ndf_train = pd.read_csv('data/trainData.csv')\ndf_sample = pd.read_csv('data/sample.csv')\n\ndf_train.index = df_train['日期']\n\ndf_train.info()\ndf_sample.info()\n# df_sample['地区'].value_counts()\n\n\n# In[3]:\n\nArea = set(df_sample['地区'].values)\nArea = list(Area)\n\n\n# In[4]:\n\n# 得到前k天的价格列表\ndef proDataK(df_train_GD,s,k):\n tmp = []\n curtime = datetime.datetime.strptime(s,'%Y-%m-%d')\n lasttime = curtime - datetime.timedelta(days = k)\n # 日期到字符串\n lasttime = datetime.datetime.strftime(lasttime,'%Y-%m-%d')\n curtime = datetime.datetime.strftime(curtime,'%Y-%m-%d')\n tmp.extend(list(df_train_GD[lasttime:curtime]['价格'].values))\n \n if len(tmp)<(k+1):\n tmp.extend(tmp[len(tmp)-(k+1):])\n \n return tmp\n\n\ndef yesterdayYear(df_train_GD,s,k):\n tmp = []\n curtime = datetime.datetime.strptime(s,'%Y-%m-%d')\n lasttime = curtime - datetime.timedelta(days = k)\n nexttime = curtime + datetime.timedelta(days = k)\n\n curtime = datetime.datetime.strftime(curtime,'%Y-%m-%d')\n lasttime = datetime.datetime.strftime(lasttime,'%Y-%m-%d')\n nexttime = datetime.datetime.strftime(nexttime,'%Y-%m-%d')\n\n tmp.extend(list(df_train_GD[lasttime:curtime]['价格'].values))\n tmp.extend(list(df_train_GD[curtime:nexttime]['价格'].values))\n\n if len(tmp) < 2 * (k+1):\n tmp.extend(tmp[len(tmp)-2 * (k+1):])\n \n return tmp\n \n\n\n# In[5]:\n\ndef getLineModel(AreaName):\n df_train_GD = df_train[df_train['地区'] == AreaName]\n year = 2016\n x = []\n y = []\n\n for i in range(1,4):\n# print(i)\n for d in range(1,32):\n if i == 2 and d > 28:\n continue\n \n tmp = []\n if d < 10: \n s = str(year)+'-0'+str(i)+'-0'+str(d)\n else: \n s = str(year)+'-0'+str(i)+'-'+str(d)\n \n # print(s)\n if len(df_train_GD[df_train_GD.index == s]['价格'].values) == 0:\n if len(y) == 0:\n y.append(df_train_GD[df_train_GD.index == '2016-01-01']['价格'].values[0])\n else:\n y.append(y[len(y)-1])\n\n else:\n y.append(df_train_GD[df_train_GD.index == s]['价格'].values[0])\n \n \n # 添加前d天的数据特征\n for k in [1,3,5,7]:\n tmp.extend(proDataK(df_train_GD,s,k))\n \n if len(tmp) < 20:\n tmp.extend(tmp[len(tmp)-20:])\n \n \n# print(len(tmp))\n \n \n # 得到前两年对应时间的前5天,后五天\n if d < 10: \n s = str(year-2)+'-0'+str(i)+'-0'+str(d)\n else: \n s = str(year-2)+'-0'+str(i)+'-'+str(d)\n \n for k in [1,3,5,7]:\n tmp.extend(yesterdayYear(df_train_GD,s,k))\n \n \n \n # 得到前一年对应时间的前5天,后五天\n if d < 10: \n s = str(year-1)+'-0'+str(i)+'-0'+str(d)\n else: \n s = str(year-1)+'-0'+str(i)+'-'+str(d)\n\n for k in [1,3,5,7]:\n tmp.extend(yesterdayYear(df_train_GD,s,k))\n \n if len(tmp) < 100:\n tmp.extend(tmp[len(tmp)-100:])\n \n \n# print(len(tmp))\n x.append(tmp)\n\n # print(s)\n\n # 得到参数\n x = np.array(x)\n y = np.array(y)\n\n print(x.shape)\n print(len(y))\n\n from sklearn import linear_model\n\n regr = linear_model.LinearRegression()\n \n regr.fit(x, y)\n\n regr.coef_\n \n return regr\n\n\n\n\n# In[6]:\n\ndef Predicted(AreaName,regr):\n df_train_GD = df_train[df_train['地区'] == AreaName]\n year = 2017\n x = []\n y = []\n \n y_ = 5\n for i in range(1,4):\n for d in range(1,32):\n if i == 2 and d > 28: \n continue\n tmp = []\n if d < 10: \n ts = str(year)+'-0'+str(i)+'-0'+str(d)\n else: \n ts = str(year)+'-0'+str(i)+'-'+str(d)\n \n # 添加前d天的数据特征\n for k in [1,3,5,7]:\n tmp.extend(proDataK(df_train_GD,ts,k))\n \n if len(tmp) < 20:\n tmp.extend(tmp[len(tmp)-20:])\n \n \n # print(len(tmp))\n # 得到前两年对应时间的前5天,后五天\n if d < 10: \n s = str(year-2)+'-0'+str(i)+'-0'+str(d)\n else: \n s = str(year-2)+'-0'+str(i)+'-'+str(d)\n \n \n for k in [1,3,5,7]:\n tmp.extend(yesterdayYear(df_train_GD,s,k))\n\n # print(df_train_GD[curtime:nexttime])\n\n\n\n # 得到前一年对应时间的前5天,后五天\n if d < 10: \n s = str(year-1)+'-0'+str(i)+'-0'+str(d)\n else: \n s = str(year-1)+'-0'+str(i)+'-'+str(d)\n \n \n for k in [1,3,5,7]:\n tmp.extend(yesterdayYear(df_train_GD,s,k))\n \n \n if len(tmp) < 100:\n tmp.extend(tmp[len(tmp)-100:])\n \n # print(df_train_GD[lasttime:curtime])\n # print(df_train_GD[curtime:nexttime])\n\n # print(tmp)\n tmp = np.array(tmp)\n \n if y_ > 1 and y_ < 25: \n yp_ = y_\n\n y_ = regr.predict(tmp)\n \n if y_ < 1 or y_ > 25: # 设置为前一个的值\n y_ = yp_\n \n t = pd.DataFrame({'日期':ts, '地区':AreaName,'价格':y_,'数量':None,'均重':None})\n t.index = t['日期']\n # print(t)\n\n df_train_GD = df_train_GD.append(t)\n \n# print(len(df_train_GD['2017-01-01':'2017-04-01']))\n \n return df_train_GD['2017-01-01':'2017-03-31']\n\n\n# In[7]:\n\ndf_ans = pd.DataFrame()\n\nfor i in range(len(Area)):\n regrModel = getLineModel(Area[i])\n df_ans = df_ans.append(Predicted(Area[i],regrModel))\n\n\n# In[155]:\n\n# 写入文件\ndf_ans.info()\ndf_ans = df_ans.sort(['日期','地区'])[['地区','价格']]\n\ndf_ans.to_csv('data/result1_1.csv')\n\n\n# In[156]:\n\ndf_ans\n\n\n# In[93]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[149]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"code/myCode1.py","file_name":"myCode1.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76176959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nuse this class for encoding sparse data\n'''\n\nfrom six import integer_types\nfrom collections import defaultdict\n#import numpy\nclass sparse(object):\n 'the encoding class for maxent'\n def __init__(self, labels, mapping, unseen_features=False,\n alwayson_features=False):\n if set(mapping.values()) != set(range(len(mapping))):\n raise ValueError('Mapping values must be exactly the '\n 'set of integers from 0...len(mapping)')\n\n self._labels = list(labels)\n \"\"\"A list of attested labels.\"\"\"\n\n self._mapping = mapping\n \"\"\"dict mapping from (fname,fval,label) -> fid\"\"\"\n\n self._length = len(mapping)\n \"\"\"The length of generated joint feature vectors.\"\"\"\n\n self._alwayson = None\n \"\"\"dict mapping from label -> fid\"\"\"\n\n self._unseen = None\n \"\"\"dict mapping from fname -> fid\"\"\"\n\n if alwayson_features:\n self._alwayson = dict((label, i+self._length)\n for (i, label) in enumerate(labels))\n self._length += len(self._alwayson)\n\n if unseen_features:\n fnames = set(fname for (fname, fval, label) in mapping)\n self._unseen = dict((fname, i+self._length)\n for (i, fname) in enumerate(fnames))\n self._length += len(fnames)\n\n def encode(self, featureset, label):\n # Inherit docs.\n encoding = []\n\n # Convert input-features to joint-features:\n for fname, fval in featureset.items():\n # Known feature name & value:\n if (fname, fval, label) in self._mapping:\n encoding.append((self._mapping[fname, fval, label], 1))\n\n # Otherwise, we might want to fire an \"unseen-value feature\".\n elif self._unseen:\n # Have we seen this fname/fval combination with any label?\n for label2 in self._labels:\n if (fname, fval, label2) in self._mapping:\n break # we've seen this fname/fval combo\n # We haven't -- fire the unseen-value feature\n else:\n if fname in self._unseen:\n encoding.append((self._unseen[fname], 1))\n\n # Add always-on features:\n if self._alwayson and label in self._alwayson:\n encoding.append((self._alwayson[label], 1))\n\n return encoding\n\n\n def describe(self, f_id):\n # Inherit docs.\n if not isinstance(f_id, integer_types):\n raise TypeError('describe() expected an int')\n try:\n self._inv_mapping\n except AttributeError:\n self._inv_mapping = [-1]*len(self._mapping)\n for (info, i) in self._mapping.items():\n self._inv_mapping[i] = info\n\n if f_id < len(self._mapping):\n (fname, fval, label) = self._inv_mapping[f_id]\n return '%s==%r and label is %r' % (fname, fval, label)\n elif self._alwayson and f_id in self._alwayson.values():\n for (label, f_id2) in self._alwayson.items():\n if f_id == f_id2:\n return 'label is %r' % label\n elif self._unseen and f_id in self._unseen.values():\n for (fname, f_id2) in self._unseen.items():\n if f_id == f_id2:\n return '%s is unseen' % fname\n else:\n raise ValueError('Bad feature id')\n\n\n def labels(self):\n # Inherit docs.\n return self._labels\n\n\n def length(self):\n # Inherit docs.\n return self._length\n\n\n @classmethod\n def store(cls, train_toks, count_cutoff=0, labels=None, **options):\n mapping = {} # maps (fname, fval, label) -> fid\n seen_labels = set() # The set of labels we've encountered\n count = defaultdict(int) # maps (fname, fval) -> count\n\n for (tok, label) in train_toks:\n if labels and label not in labels:\n raise ValueError('Unexpected label %s' % label)\n seen_labels.add(label)\n\n # Record each of the features.\n for (fname, fval) in tok.items():\n\n # If a count cutoff is given, then only add a joint\n # feature once the corresponding (fname, fval, label)\n # tuple exceeds that cutoff.\n count[fname, fval] += 1\n if count[fname, fval] >= count_cutoff:\n if (fname, fval, label) not in mapping:\n mapping[fname, fval, label] = len(mapping)\n\n if labels is None:\n labels = seen_labels\n return cls(labels, mapping, **options)","sub_path":"sparse_class.py","file_name":"sparse_class.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"415628542","text":"import json\nimport sys\nf = open(sys.argv[1],'r')\nlines = f.readlines()\nrows = []\nfor line in lines:\n\treg = {}\n\tdata = line.split('\\n')[0].split('\\t')\n\tif len(data) != 1:\n\t\tcontinue\n\treg['object'] = data[0]\n\trows.append(reg)\t\nf.close()\ndata = {}\ndata['table'] = rows\nprint('total ' + str(len(rows)) + ' rows imported') \nf = open('objects.json.raw','w')\njson.dump(data, f)\nf.close()\n","sub_path":"list_objects2json.py","file_name":"list_objects2json.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537009960","text":"import pandas as pd\nimport numpy as np\nimport copy\n\n\nclass TriangleDunders:\n ''' Class that implements the dunder (double underscore) methods for the\n Triangle class\n '''\n def _validate_arithmetic(self, other):\n ''' Common functionality BEFORE arithmetic operations '''\n obj = copy.deepcopy(self)\n other = other if type(other) in [int, float] else copy.deepcopy(other)\n ddims = None\n odims = None\n if type(other) not in [int, float, np.float64, np.int64]:\n if len(self.vdims) != len(other.vdims):\n raise ValueError('Triangles must have the same number of ' +\n 'columns')\n if len(self.kdims) != len(other.kdims):\n raise ValueError('Triangles must have the same number of ' +\n 'index')\n if len(self.vdims) == 1:\n other.vdims = np.array([None])\n # If broadcasting doesn't work, then try intersecting before\n # failure\n a, b = self.shape[-2:], other.shape[-2:]\n if not (a[0] == 1 or b[0] == 1 or a[0] == b[0]) and \\\n not (a[1] == 1 or b[1] == 1 or a[1] == b[1]):\n ddims = set(self.ddims).intersection(set(other.ddims))\n odims = set(self.odims).intersection(set(other.odims))\n # Need to set string vs int type-casting\n odims = pd.PeriodIndex(np.array(list(odims)),\n freq=self.origin_grain)\n obj = obj[obj.origin.isin(odims)][obj.development.isin(ddims)]\n other = other[other.origin.isin(odims)][other.development.isin(ddims)]\n obj.odims = np.sort(np.array(list(odims)))\n obj.ddims = np.sort(np.array(list(ddims)))\n other = other.values\n return obj, other\n\n def _arithmetic_cleanup(self, obj):\n ''' Common functionality AFTER arithmetic operations '''\n obj.values = obj.values * self.expand_dims(obj.nan_triangle())\n obj.values[obj.values == 0] = np.nan\n obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims\n return obj\n\n def __add__(self, other):\n obj, other = self._validate_arithmetic(other)\n obj.values = np.nan_to_num(obj.values) + np.nan_to_num(other)\n return self._arithmetic_cleanup(obj)\n\n def __radd__(self, other):\n return self if other == 0 else self.__add__(other)\n\n def __sub__(self, other):\n obj, other = self._validate_arithmetic(other)\n obj.values = np.nan_to_num(obj.values) - \\\n np.nan_to_num(other)\n return self._arithmetic_cleanup(obj)\n\n def __rsub__(self, other):\n obj, other = self._validate_arithmetic(other)\n obj.values = np.nan_to_num(other) - \\\n np.nan_to_num(obj.values)\n return self._arithmetic_cleanup(obj)\n\n def __len__(self):\n return self.shape[0]\n\n def __neg__(self):\n obj = copy.deepcopy(self)\n obj.values = -obj.values\n return obj\n\n def __pos__(self):\n return self\n\n def __mul__(self, other):\n obj, other = self._validate_arithmetic(other)\n obj.values = np.nan_to_num(obj.values)*other\n return self._arithmetic_cleanup(obj)\n\n def __rmul__(self, other):\n return self if other == 1 else self.__mul__(other)\n\n def __truediv__(self, other):\n obj, other = self._validate_arithmetic(other)\n obj.values = np.nan_to_num(obj.values)/other\n return self._arithmetic_cleanup(obj)\n\n def __rtruediv__(self, other):\n obj = copy.deepcopy(self)\n obj.values = other / self.values\n obj.values[obj.values == 0] = np.nan\n return obj\n\n def __eq__(self, other):\n if np.all(np.nan_to_num(self.values) ==\n np.nan_to_num(other.values)):\n return True\n else:\n return False\n","sub_path":"chainladder/core/dunders.py","file_name":"dunders.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"57698213","text":"\"\"\"\nDecorator - logging functionality\n\"\"\"\n\nimport time\n\n\ndef my_logger(orig_func):\n import logging\n logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)\n\n def wrapper(*args, **kwargs):\n print(args.__class__)\n logging.info(\n 'Ran with args: {}, and kwargs: {}'.format(args, kwargs))\n return orig_func(*args, **kwargs)\n\n return wrapper\n\n\ndef my_timer(orig_func):\n import time\n\n def wrapper(*args, **kwargs):\n print(args.__class__)\n t1 = time.time()\n result = orig_func(*args, **kwargs)\n t2 = time.time() - t1\n print('{} ran in: {} sec'.format(orig_func.__name__, t2))\n return result\n\n return wrapper\n\n\n@my_logger\n@my_timer\ndef display_info(name, age, wait_time=1):\n # wait wait_time seconds, till showing message\n time.sleep(wait_time)\n print('display_info ran with arguments ({}, {})'.format(name, age))\n\ndisplay_info = my_timer(display_info)\n\n# shows wrapper, instead orig\n# print(display_info.__name__)\n\ndisplay_info('Przemek', 25)\ndisplay_info('Przemek', 44)\ndisplay_info('Przemek', 42)\n","sub_path":"biblioteki/Pozostałe/[1] Dekoratory/zadania/[DEKORATORY] [2] logowanie informacji.py","file_name":"[DEKORATORY] [2] logowanie informacji.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428787909","text":"\"\"\"\n def check_value(jdict, key):\n if isinstance(jdict, list):\n for element in jdict:\n check_value(element, key)\n elif isinstance(jdict, dict):\n if key in list(jdict.keys()):\n if jdict[key]:\n try:\n raise FoundException\n except FoundException as e:\n\n value.append(jdict[key])\n else:\n A = []\n B = {}\n\n for y in list(jdict.keys()):\n\n if type(jdict[y]) == type(A):\n for z in jdict[y]:\n check_value(z, key)\n\n elif type(jdict[y]) == type(B):\n check_value(jdict[y], key)\n return value\n\n value = []\n\n def check_value(jdict, key):\n if isinstance(jdict, list):\n for element in jdict:\n check_value(element, key)\n elif isinstance(jdict, dict):\n if key in list(jdict.keys()):\n if jdict[key]:\n try:\n raise FoundException\n except FoundException as e:\n\n value.append(jdict[key])\n else:\n A = []\n B = {}\n\n for y in list(jdict.keys()):\n\n if type(jdict[y]) == type(A):\n for z in jdict[y]:\n check_value(z, key)\n\n elif type(jdict[y]) == type(B):\n check_value(jdict[y], key)\n return value\n\n\n\"\"\"\n\n\n\nvalue = []\n\n\ndef check_value(jdict, key):\n if isinstance(jdict, list):\n for element in jdict:\n check_value(element, key)\n elif isinstance(jdict, dict):\n if key in list(jdict.keys()):\n value.append(jdict[key])\n else:\n A = []\n B = {}\n for y in list(jdict.keys()):\n if type(jdict[y]) == type(A):\n for z in jdict[y]:\n check_value(z, key)\n elif type(jdict[y]) == type(B):\n check_value(jdict[y], key)\n return value\nd = {\n \"code\": 0,\n \"msg\": \"success\",\n \"data\": {\n \"verifyCodeUrl\": \"http://zhonghuan-1257386775.cos.ap-beijing.myqcloud.com/chedui-web/2019/03/14/4c8b26df862b4fe6869c9174070d2e31.jpg\",\n \"verifyCodeId\": \"5092e3b6c89e45d9bfc052060aa55b2d\"\n }\n}\n\nimport requests\ndef main_test():\n url = \"http://62.234.197.128/basic-data/data/trailer/list\"\n\n payload = \"{\\n \\\"trailerPlateList\\\":[] ,\\n \\\"assignCodeList\\\": [],\\n \\\"page\\\": 1,\\n \\\"pageSize\\\": 10\\n}\"\n headers = {\n 'Content-Type': \"application/json\",\n 'X-Auth-Token': \"39d124367fcf40b3aad5d270c0aeb406\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"b93cb8e2-93b6-42ee-ab9b-01ca845d13df\"\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n\n print(response.text)\n#print(check_value(d,'verifyCodeUrl'))\n#main_test()\nimport random\ndef GBK2312():\n head = random.randint(0xb0, 0xf7)\n body = random.randint(0xa1, 0xf9) # 在head区号为55的那一块最后5个汉字是乱码,为了方便缩减下范围\n val = f'{head:x}{body:x}'\n str = bytes.fromhex(val).decode('gb2312')\n return str\n\n\n\n\n# -*- coding: utf-8 -*-\nimport random\ndef generate_verification_code(len=6):\n ''' 随机生成6位的验证码 '''\n # 注意: 这里我们生成的是0-9A-Za-z的列表,当然你也可以指定这个list,这里很灵活\n # 比如: code_list = ['P','y','t','h','o','n','T','a','b'] # PythonTab的字母\n code_list = []\n for i in range(10): # 0-9数字\n code_list.append(str(i))\n for i in range(65, 91): # 对应从“A”到“Z”的ASCII码\n code_list.append(chr(i))\n for i in range(97, 123): #对应从“a”到“z”的ASCII码\n code_list.append(chr(i))\n myslice = random.sample(code_list, len) # 从list中随机获取6个元素,作为一个片断返回\n verification_code = ''.join(myslice) # list to string\n return verification_code\n\nimport string\ndef phone_num(num):\n all_phone_nums=[]\n num_start = ['134', '135', '136', '137', '138', '139', '150', '151', '152', '158', '159', '157', '182', '187', '188',\n '147', '130', '131', '132', '155', '156', '185', '186', '133', '153', '180', '189']\n for i in range(num):\n start = random.choice(num_start)\n end = ''.join(random.sample(string.digits,8))\n res = start+end\n all_phone_nums.append(res)\n return all_phone_nums\n # with open('phone_num.txt','w',encoding='utf-8') as fw:\n # fw.writelines(all_phone_nums)\nphone_num(1000)\n\n#print(generate_verification_code(len=6))\nprint(phone_num(10))","sub_path":"test_temp.py","file_name":"test_temp.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"614585853","text":"import json\nimport argparse\nimport numpy as np\n\nimport torch\n\nfrom sklearn.cluster import KMeans\n\nfrom graph import load_graph\nfrom models import NegativeSamplingModel\nfrom learning import set_seed\nfrom model_utils import make_structural_model, train_negative_sampling\nfrom experiment_utils import create_experiment_params\n\n\nDEFAULT_DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nLINK_PREDICTION_OUTPUTS = 1\nEXPERIMENT_ID = 1\nNUM_CLUSTERS = 9\n\nKEY = 'Karate' \nGRAPH_PATH = 'data/{}/{}.edgelist'.format(KEY, KEY)\nOUTPUT_PATH = 'output/{}-result.jsonl'.format(KEY)\nCLUSTERS_OUTPUT_PATH = 'output/{}.clusters.tsv'.format(KEY)\nEXPERIMENT_PATH = 'configs/clusteringAtK2.json'\n\ndef clustering_experiment(graph_path, \n model_options=None,\n training_options=None,\n cluster_kwargs={'n_clusters': NUM_CLUSTERS},\n device=DEFAULT_DEVICE,\n experiment_id=EXPERIMENT_ID,\n use_seed=True):\n seed = None\n if use_seed:\n seed = experiment_id\n set_seed(seed)\n G = load_graph(graph_path)\n\n # Prepare the components and train the embedding model\n mapper, model = make_structural_model(G, model_options, device)\n trained_model = train_negative_sampling(G, model, model_options.neg_sampling_parameters, training_options, device)\n\n # Train the clustering model and compute the per-node cluster\n node_ids = {i: node['id'] for i, node in enumerate(G.vs)}\n embeddings = trained_model(G.vs, G).detach().numpy()\n clustering_model = KMeans(**cluster_kwargs).fit(embeddings)\n node_clusters = clustering_model.predict(embeddings)\n node_clusters = {node_ids[i]: int(value) for i, value in enumerate(node_clusters)}\n return trained_model, clustering_model, node_clusters\n\n\ndef parse_arguments():\n main_args = argparse.ArgumentParser()\n main_args.add_argument('-n', '--num-clusters', help='Number of experiment attempts to run per experiment configuration.', type=int, default=NUM_CLUSTERS)\n main_args.add_argument('-g', '--graph-path', help='Path to the graph edgelist file to be used.', type=str, default=GRAPH_PATH)\n main_args.add_argument('-o', '--output-path', help='Path to store the clustering results, as a tab separated file containing node ids and clusters.', type=str, default=CLUSTERS_OUTPUT_PATH)\n main_args.add_argument('-e', '--experiment-config', help='Path to the experiment configuration json specifying training and model parameters.', type=str, default=EXPERIMENT_PATH)\n return main_args.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n\n num_clusters = args.num_clusters\n graph_path = args.graph_path\n output_path = args.output_path\n\n experiment_as_dict = {}\n with open(args.experiment_config, 'r') as f:\n experiment_as_dict = json.load(f)\n\n model_config, training_config, _, _, _ = create_experiment_params(experiment_as_dict)\n embedding_model, clustering_model, node_clusters = clustering_experiment(graph_path, \n model_config, \n training_config, \n cluster_kwargs={'n_clusters': num_clusters})\n\n with open(output_path, 'w') as f:\n for node_id, cluster in clusters.items():\n f.write('{}\\t{}\\n'.format(node_id, cluster))\n\n\n\n\n","sub_path":"src/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"4157623","text":"# ICA 3, Part 1\n\nplayers = int(input('Enter number of players: '))\nseasons = int(input('Enter number of seasons: '))\n\nfor x in range(players):\n total = 0\n name = input('Enter the name of a hockey player ')\n for y in range(1, seasons+1):\n goals = int(input(f'Enter goals scored in season #{y}: '))\n total += goals \n print(name, 'has scored', total, \"goals in the last\", seasons, \"seasons.\")\n\n# ICA 3, Part 2\n\nBASE_SIZE = 8\nfor row in range(BASE_SIZE, 0, -1):\n for col in range(row+1):\n print('*', end='')\n print()\nprint()\n\nNUM_STEPS = 6\nfor row in range(NUM_STEPS, 0, -1):\n for col in range(row-1):\n print(' ', end='')\n print('#')\nprint()\n\n# ICA 3, Part 3\n\nfor x in range(7):\n for y in range(x):\n print('$', end='')\n for z in range(6-x):\n print('#', end='')\n print()","sub_path":"ICA/Ch4Pt3.py","file_name":"Ch4Pt3.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"91505880","text":"from parsers.AbstractTSPParser import AbstractTSPParser\nfrom tsp_profiles.TumorSampleProfile import TumorSampleProfile\nfrom tsp_profiles.ReadCount import ReadCount\nimport os.path\n\nclass DefaultTSPParser(AbstractTSPParser):\n \n def _parse_header(self, header):\n print('parsing header...')\n self._init_profile_list(header)\n header = header.strip().split()\n Len=len(header)\n c=0\n Name2Col={}\n NameOrder=[]\t\n while cRank among IIT in India : '+str(rank)+\r\n '
NIRF Score : '+ str(score)+\r\n \"
\",icon=folium.Icon(color='red')))\r\n\r\nmap = folium.Map(Location =[20.0000,75.0000],zoom_start= 4)\r\n# latitude and longitude when you open your file from which location it will be visible\r\nmap.add_child(fg)\r\nmap.save('final.html')","sub_path":"23.create webmap using python ( 1080 X 1920 ).py","file_name":"23.create webmap using python ( 1080 X 1920 ).py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"2257721","text":"import numpy as np\nfrom scipy.ndimage import binary_fill_holes, label\nfrom skimage.morphology import binary_dilation, binary_erosion, disk\nfrom skimage import transform\n\ndef smooth_mask( _input,\n down_shape=-1, \n mode='classifier',\n thin_order=10,\n smooth_order=25 ):\n # print('Smoothing...')\n\n ### read in kwargs\n original_shape = _input.shape\n if down_shape != -1:\n shape = (int(_input.shape[0]*down_shape), int(_input.shape[1]*down_shape))\n else:\n shape = _input.shape\n\n _input = transform.resize(_input.astype(float), shape, order=0, preserve_range=True)\n _input = _input/np.max(_input)\n _input = 1.*(_input>np.min(_input))\n\n if mode == 'classifier':\n # smooth\n _input = binary_fill_holes(_input)\n\n # remove edge objects\n negative = binary_fill_holes(_input==0)\n _input = _input*negative\n if np.sum(_input) == 0:\n return _input.astype(np.uint8)\n\n # keep only largest object\n labeled_mask, cc_num = label(_input)\n _input = (labeled_mask == (np.bincount(labeled_mask.flat)[1:].argmax() + 1))\n _input = binary_fill_holes(_input)\n\n # thin out\n _input = binary_erosion(_input,disk(thin_order))\n if np.sum(_input) == 0:\n return _input.astype(np.uint8)\n\n # keep only largest object\n labeled_mask, cc_num = label(_input)\n _input = (labeled_mask == (np.bincount(labeled_mask.flat)[1:].argmax() + 1))\n\n # smooth\n _input = binary_erosion(_input,disk(smooth_order))\n _input = binary_dilation(_input,disk(smooth_order))\n\n _input = transform.resize(_input.astype(float), original_shape, order=0, preserve_range=True)\n\n return _input.astype(np.uint8)\n","sub_path":"morgana/ImageTools/segmentation/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"285520216","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\nfrom collections import deque\n\nfor tc in range(1, int(input())+1):\n N, M = map(int, input().split())\n V = [[] for _ in range(N+1)]\n for i in range(M):\n a, b = map(int, input().split())\n V[a].append(b)\n V[b].append(a)\n visit = [False]*(N+1)\n visit[1] = True\n queue = deque([1])\n ans = 0\n for _ in range(2):\n for _ in range(len(queue)):\n a = queue.popleft()\n for i in V[a]:\n if not visit[i]:\n ans += 1\n visit[i] = True\n queue.append(i)\n print('#{} {}'.format(tc, ans))","sub_path":"swea/D5/5521.py","file_name":"5521.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"323594477","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import *\nfrom django.contrib.auth.models import User\nfrom models import Tipo\nfrom home.models import Academia_Profesor, Academia, Profesor\nfrom home.views import *\n\nmensaje = ''\ncaracteres_validos_Usuarios = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_?0123456789!'\ncaracter_espacio = ' '\n# Create your views here.\ndef loginAdmins(request):\n\tglobal mensaje\n\tdiccionario = {'mensaje' : mensaje}\n\tmensaje = ''\n\treturn render(request, 'loginAdmins.html', diccionario)\n\ndef entrado(request):\n\tglobal mensaje\n\tusername = request.POST.get('usuario', '')\n\tpassword = request.POST.get('contrasenna', '')\n\tuser = authenticate(username=username, password=password)\n\tcaracteres = [\"'\", '\"']\n\tfor caracter in caracteres:\n\t\tif caracter in username or caracter in password:\n\t\t\tmensaje = \"SQL injection tu puta madre\"\n\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\tif user is not None:\n\t\tif user.is_active:\n\t\t\tlogin(request, user)\n\t\t\ttipo = Tipo.objects.get(usuario=user)\n\t\t\tif tipo.tipo == 1:\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/Academia\")\n\t\t\telif tipo.tipo == 2:\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/Profesor\")\n\t\t\telif tipo.tipo == 3:\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAlumnos\")\n\t\telse:\n\t\t\tdiccionario = {'mensaje' : \"NO estas activo\"} \n\t\t\treturn HttpResponseRedirect(\"/Gaby/Academia\", diccionario)\n\telse:\n\t\tmensaje = 'Usuario y contraseña incorrectos'\n\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\t\n\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\n\n\ndef registrar(request):\n\tglobal mensaje\n\tusuario1 = request.POST.get('username', '')\n\tusuarios = User.objects.all()\n\tbueno = False\n\tfor x in usuario1:\n\t\tif x in caracteres_validos_Usuarios:\n\t\t\tbueno = True\n\t\tif x in caracter_espacio:\n\t\t\tbueno = False\n\t\t\tbreak\n\n\tif bueno and len(usuario1)>=5:\n\t\tcaracteres = [\"'\", '\"']\n\t\tfor usuario in usuarios:\n\t\t\tif usuario.username == usuario1:\n\t\t\t\tmensaje = \"El usuario ya existe\"\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\t\tcontrasenna1 = request.POST.get('password', '')\n\t\tcontrasenna = request.POST.get('password1', '')\n\n\t\tif contrasenna1 != contrasenna:\n\t\t\tmensaje = 'Las contraseñas no coinciden'\n\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\n\t\tif len(contrasenna1) >= 6:\n\t\t\tfor caracter in caracteres:\n\t\t\t\tif caracter in usuario1 or caracter in contrasenna:\n\t\t\t\t\tmensaje = \"Por favor no incluyas \" + '\" ' + \"ó\" + \"' \" + \"en tu nombre de usuario\"\n\t\t\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\n\t\t\tcorreo1 = request.POST.get('email', '')\n\t\t\tnombre1 = request.POST.get('first_name', '')\n\t\t\tbueno2 = False\n\t\t\tfor y in nombre1:\n\t\t\t\tif y in caracteres_validos_Usuarios:\n\t\t\t\t\tbueno2 = True\n\n\t\t\tif bueno2:\n\t\t\t\tuser = User.objects.create_user(usuario1, correo1, contrasenna1, first_name=nombre1)\n\t\t\t\ttipo = 1\n\t\t\t\tun_poco_mas = Tipo(tipo = tipo, usuario = user)\n\t\t\t\tun_poco_mas.save()\n\n\t\t\t\tacademiaNueva = Academia(academia=user)\n\t\t\t\tacademiaNueva.save()\n\n\t\t\t\tmensaje = 'Usuario creado exitosamente'\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\t\t\telse:\n\t\t\t\tmensaje = 'Nombre de academia invalido'\n\t\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\t\telse:\n\t\t\tmensaje = 'La contraseña debe tener al menos 6 caracteres'\n\t\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\telse:\n\t\tif len(usuario1) < 5:\n\t\t\tmensaje = 'El nombre de usuario debe tener al menos 5 caracteres'\n\t\telse:\n\t\t\tmensaje = 'Nombre de usuario invalido'\n\t\treturn HttpResponseRedirect(\"/Gaby/AccesoAdmins\", {'mensaje':mensaje})\n\ndef salir(request):\n\tlogout(request)\n\treturn HttpResponseRedirect(\"/Gaby\")\n\n@login_required(login_url='/Gaby/AccesoAdmins')\ndef registrarProfe(request):\n\ttipo = mostrarTipo(request.user)\n\tif tipo != 1:\n\t\tif tipo == 2:\n\t\t\treturn redirect('/Gaby/Profesor')\n\t\telse:\n\t\t\treturn redirect('/Gaby/AccesoAlumnos') \n\tglobal mensaje\n\tusuario = request.POST.get('username', '')\n\tusuarios = User.objects.all()\n\tfor elemento in usuarios:\n\t\tif elemento.username == usuario:\n\t\t\tmensaje = \"El usuario ya existe\"\n\t\t\treturn HttpResponseRedirect('/Gaby/Academia/Maestros/Registrar/', {'mensaje':mensaje})\n\n\tcontrasenna = 'dreediGaby'\n\n\tcorreo = request.POST.get('email', '')\n\tnombre = request.POST.get('first_name', '')\n\tapellido = request.POST.get('last_name', '')\n\tuser = User.objects.create_user(usuario, correo, contrasenna, first_name=nombre, last_name=apellido)\n\ttipo = 2\n\tun_poco_mas = Tipo(tipo = tipo, usuario = user)\n\tun_poco_mas.save()\n\n\tprofesorNuevo = Profesor(profesor=user)\n\tprofesorNuevo.save()\n\n\tacademia = Academia.objects.get(academia=request.user.id)\n\n\trelacionProfeAcademia = Academia_Profesor(academia=academia, profesor=profesorNuevo)\n\trelacionProfeAcademia.save()\n\n\tmensaje = 'Profesor registrado exitosamente'\n\treturn HttpResponseRedirect('/Gaby/Academia/Maestros/Registrar/', {'mensaje':mensaje})\n\n@login_required(login_url='/Gaby/AccesoAdmins')\ndef maestrosRegistrar(request):\n\tglobal mensaje\n\ttipo = mostrarTipo(request.user)\n\tif tipo != 1:\n\t\tif tipo == 2:\n\t\t\treturn redirect('/Gaby/Profesor')\n\t\telse:\n\t\t\treturn redirect('/Gaby/AccesoAlumnos') \n\tdiccionario = {\n\t\t'mensaje': mensaje,\n\t}\n\tmensaje = ''\n\treturn render(request, 'academiaMaestrosRegistrar.html',diccionario)","sub_path":"userProfiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"533152013","text":"# -*- coding: utf-8 -*-\n\nimport random\nimport re\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n\nclass DetailSpider():\n def __init__(self):\n self.broswer_client = webdriver.Firefox()\n self.request_url = \"https://items.alitrip.com/item.htm\"\n self.request_header = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"accept-language\": \"zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,it;q=0.2\",\n \"cache-control\": \"no-cache\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36\"\n }\n self.request_data = {\n \"id\": \"\",\n \"smToken\": \"\",\n \"smSign\": \"\",\n \"spm\": \"\"\n }\n self.spider_data = {\n \"shopName\": \"\",\n \"descSorce\": 4.5,\n \"serverSorce\": 4.5,\n \"logisticsScore\": 4.5,\n \"title\": \"\",\n \"company\": \"\",\n \"location\": \"\",\n \"fromCity\": \"\",\n \"destCity\": \"\",\n \"price\": \"\",\n \"minPrice\": 0,\n \"maxPrice\": 0,\n \"sellCount\": 0,\n \"gradeAvg\": 4.5,\n \"rateTotal\": 0,\n \"days\": 1,\n \"promise\": \"\"\n }\n self.default_spider_data = {\n \"shopName\": \"\",\n \"descSorce\": 4.5,\n \"serverSorce\": 4.5,\n \"logisticsScore\": 4.5,\n \"title\": \"\",\n \"company\": \"\",\n \"location\": \"\",\n \"fromCity\": \"\",\n \"destCity\": \"\",\n \"price\": \"\",\n \"minPrice\": 0,\n \"maxPrice\": 0,\n \"sellCount\": 0,\n \"gradeAvg\": 4.5,\n \"rateTotal\": 0,\n \"days\": 1,\n \"promise\": \"\"\n }\n\n def __set_request_data(self, item_id, sm_token=\"\", sm_sign=\"\", spm=\"\"):\n self.request_data[\"id\"] = item_id\n self.request_data[\"smToken\"] = sm_token\n self.request_data[\"smSign\"] = sm_sign\n self.request_data[\"spm\"] = spm\n\n def set_sm_param(self, sm_token, sm_sign):\n self.__set_request_data(self.request_data[\"id\"], sm_token, sm_sign, self.request_data[\"spm\"])\n\n def set_item_id(self, item_id):\n self.__set_request_data(item_id, self.request_data[\"smToken\"], self.request_data[\"smSign\"], self.request_data[\"spm\"])\n\n def __get_url_params(self):\n data = self.request_data\n return \"id=%s&smToken=%s&smSign=%s&spm=%s\" % (data[\"id\"], data[\"smToken\"], data[\"smSign\"], data[\"spm\"])\n\n def __get_full_url(self):\n return self.request_url + \"?\" + self.__get_url_params()\n\n def reset_data(self):\n for data in self.spider_data:\n self.spider_data[data] = self.default_spider_data[data]\n\n def __parse_page(self, page):\n detail_page = BeautifulSoup(page, \"html.parser\")\n # print(type(detail_page))\n shop_name_tag = detail_page.find(\"a\", {\"class\": \"slogo-shopname\"})\n h_shopcard_scores = detail_page.select(\".h-shopcard-scores a\")\n h_shopcard_seller = detail_page.find(\"div\", {\"class\": \"h-shopcard-mid\"}).find(\"dl\")\n detail_hd = detail_page.find(\"div\", {\"class\": \"detail-hd\"}).find(\"h1\")\n price = detail_page.find(\"span\", {\"class\": \"detail-price J_PriceWrap\"})\n sell_count = detail_page.find(\"em\", {\"class\": \"ml J_SellCount\"})\n grade = detail_page.find(\"em\", {\"class\": \"J_Grade\"})\n comment = detail_page.find(\"span\", {\"class\": \"J_Comment\"})\n other_info = detail_page.select(\"#J_ItemPropWrap > dd\")\n if len(other_info) <= 1:\n return {}\n store_info = []\n for info in h_shopcard_seller:\n temp = str(info.string).strip()\n if temp != \"None\" and temp != \"\":\n store_info.append(temp[temp.find(\":\") + 1:])\n\n self.spider_data[\"shopName\"] = list(shop_name_tag)[0]\n self.spider_data[\"descSorce\"] = str(h_shopcard_scores[0].text).strip()\n self.spider_data[\"serverSorce\"] = str(h_shopcard_scores[1].text).strip()\n self.spider_data[\"logisticsScore\"] = str(h_shopcard_scores[2].text).strip()\n self.spider_data[\"company\"] = store_info[-2]\n self.spider_data[\"location\"] = store_info[-1]\n self.spider_data[\"title\"] = str(detail_hd.text).strip().split(\"\\n\")[0]\n self.spider_data[\"price\"] = price.text\n if price.text != \"\":\n price_list = str(price.text).split(\"~\")\n if len(price_list) == 1:\n self.spider_data[\"minPrice\"] = self.spider_data[\"maxPrice\"] = int(price_list[0])\n elif len(price_list) == 2:\n self.spider_data[\"minPrice\"] = float(price_list[0])\n self.spider_data[\"maxPrice\"] = float(price_list[1])\n\n self.spider_data[\"sellCount\"] = int(sell_count.text)\n self.spider_data[\"gradeAvg\"] = float(grade.text)\n self.spider_data[\"rateTotal\"] = int(comment.text)\n from_city = other_info[0].select(\"span\")\n # print(from_city)\n for city in from_city:\n self.spider_data[\"fromCity\"] = self.spider_data[\"fromCity\"] + city.text.strip() + \" \"\n dest_city = other_info[1].select(\"span\")\n # print(dest_city)\n for city in dest_city:\n self.spider_data[\"destCity\"] = self.spider_data[\"destCity\"] + city.text.strip() + \" \"\n\n self.spider_data[\"days\"] = int(re.findall(\"[0-9]+\", other_info[2].text.strip())[0])\n\n promise_info = other_info[3:]\n # print(promise_info)\n for promise in promise_info:\n # print(promise.text.strip())\n self.spider_data[\"promise\"] = self.spider_data[\"promise\"] + \" \" + promise.text.strip().lstrip()\n self.spider_data[\"promise\"] = \" \".join(self.spider_data[\"promise\"].split(\"\\n\")).strip()\n # print(self.spider_data)\n\n def sleep(self, sleep_time, period=10):\n while sleep_time > 0:\n print(sleep_time)\n time.sleep(period)\n sleep_time = sleep_time - period\n\n def login(self):\n # 输入账户密码\n # 我请求的页面的账户输入框的'id'是username和密码输入框的'name'是password\n time.sleep(random.randint(10, 15))\n # firefox_login.find_element_by_id('J_Quick2Static').click()\n # firefox_login.find_element_by_class_name(\"login-switch\").click()\n self.broswer_client.find_element_by_id('TPL_username_1').clear()\n self.broswer_client.find_element_by_id('TPL_username_1').send_keys(u'5541')\n self.broswer_client.find_element_by_id('TPL_password_1').clear()\n self.broswer_client.find_element_by_id('TPL_password_1').send_keys(u'455')\n time.sleep(random.randint(10, 15))\n self.broswer_client.find_element_by_id('J_SubmitStatic').click()\n time.sleep(random(2, 4))\n # current_url = self.broswer_client.current_url\n\n def reboot(self):\n # 停留50s 用于手动登陆账号\n time.sleep(50)\n self.broswer_client.quit()\n self.broswer_client = webdriver.Firefox()\n\n def spider(self, item_id):\n self.reset_data()\n # print(\"打印重置后数据\")\n # print(self.spider_data)\n self.set_item_id(item_id)\n self.broswer_client.get(self.__get_full_url())\n page = \"\"\n try:\n page = self.broswer_client.page_source\n self.__parse_page(page)\n except Exception as err:\n print(err)\n current_url = self.broswer_client.current_url\n if current_url.find(\"login\") > 0:\n self.reboot()\n self.sleep(120, 20)\n return {}\n\n print(\"打印解析后的数据\")\n print(self.spider_data)\n if self.spider_data[\"shopName\"] == \"\":\n return {}\n return self.spider_data\n\n# detail_spider = DetailSpider()\n# detail_spider.spider(\"528037383954\")\n# # print(\"2199~2498\".split(\"~\"))\n","sub_path":"spider/DetailData.py","file_name":"DetailData.py","file_ext":"py","file_size_in_byte":7974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"490315519","text":"import threading\nimport time\n\n\ndef wants_to_eat(philosopher, name):\n pick_left = philosopher.left.acquire()\n if pick_left:\n print(f\"{name} 获取左叉子\")\n pick_right = philosopher.right.acquire()\n if pick_right:\n print(f\"{name} 获取右叉子\")\n print(f\"哲学家 {name} 开始就餐\")\n time.sleep(2)\n philosopher.right.release()\n philosopher.left.release()\n\n\nclass DiningPhilosophers(object):\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n\nif __name__ == '__main__':\n r_lock1 = threading.RLock()\n r_lock2 = threading.RLock()\n r_lock3 = threading.RLock()\n r_lock4 = threading.RLock()\n r_lock5 = threading.RLock()\n \n philosopher1 = DiningPhilosophers(r_lock5, r_lock1)\n philosopher2 = DiningPhilosophers(r_lock1, r_lock2)\n philosopher3 = DiningPhilosophers(r_lock2, r_lock3)\n philosopher4 = DiningPhilosophers(r_lock3, r_lock4)\n philosopher5 = DiningPhilosophers(r_lock4, r_lock5)\n\n run1 = threading.Thread(target=wants_to_eat, args=(philosopher1, \"philosopher1\"))\n run2 = threading.Thread(target=wants_to_eat, args=(philosopher2, \"philosopher2\"))\n run3 = threading.Thread(target=wants_to_eat, args=(philosopher3, \"philosopher3\"))\n run4 = threading.Thread(target=wants_to_eat, args=(philosopher4, \"philosopher4\"))\n run5 = threading.Thread(target=wants_to_eat, args=(philosopher5, \"philosopher5\"))\n\n run1.start()\n run2.start()\n run3.start()\n run4.start()\n run5.start()\n\n","sub_path":"Week03/phd.py","file_name":"phd.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71916997","text":"##Autora: Carolina de Farias\n##Professor: Robinson Pizzio,Dr Eng.\n##Disciplina de sinais e sistemas\n##Esse script contém o cálculo da serie de fourier das funções \"c\" e \"e\" propostas pelo professor\n\n\nfrom sympy import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cmath\nimport time\nimport warnings\n\nwarnings.filterwarnings('ignore')\nglobal somaDn\n\nsoma = 0\npositivos = []\n\npasso = 100\n\nvaloresFase =[]\n\neixosY = []\neixosX = []\n\neixosYFourier = []\n\nsomaFourier = 0\n\nt = Symbol('t')\n\nx_ = np.linspace(-3,3, 20)\n\n\ndef plotarGrafico(eixoX, valores, xlabel, ylabel, titulo, eixoY = \"\", valoresY= \"\"):\n if(titulo == 'Fourier'):\n labelX, =plt.plot(eixoX, valores, label= 'Função original')\n labelY, = plt.plot(eixoY, valoresY , label='Função Fourier')\n plt.legend(handles=[labelX, labelY])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(titulo)\n plt.show()\n elif(titulo == 'Fourier Degrau'):\n labelX, = plt.step(np.array(eixoX), valores, label='Função original')\n labelY, = plt.step(np.array(eixoY), valoresY, label='Função Fourier')\n plt.legend(handles=[labelX, labelY])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(titulo)\n plt.show()\n else:\n plt.stem(eixoX, valores, linefmt='-')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(titulo)\n plt.show()\n\n\nplt.style.use('ggplot')\n\ndef definePotenciaSinal(f1,f2):\n potenciaSinal = 0\n somaDn = 0\n for har in range(-n, n + 1):\n valorIntegral = nsimplify(Integral((((f1) * exp(complex(0, -1) * wZero * har * t)) / T), (t, -2, 0))).doit().evalf() + \\\n nsimplify(Integral((((f2) *exp(complex(0, -1) * wZero * har * t) / T)), (t, 0, 1))).doit().evalf()\n if har == 0:\n d0 = (valorIntegral)**2\n elif (har >= 1 and har < n):\n dN = abs(valorIntegral)**2\n somaDn += 2*dN\n elif har ==n:\n dN = abs((valorIntegral)**2)\n somaDn += 2 * dN\n potenciaSinal= d0 + somaDn\n return potenciaSinal\n\ndef definePotenciaSinalFuncao2(f1,f2):\n potenciaSinal = 0\n somaDn = 0\n for har in range(-n, n + 1):\n valorIntegral = nsimplify(exp(complex(0, -1) * har * wZero * t) * Integral((exp(complex(0, -1) * har * wZero * t)) / T,(t, 0, 6)).doit().evalf())\n if har == 0:\n d0 = (valorIntegral)**2\n elif (har >= 1 and har < n):\n dN = abs(valorIntegral)**2\n somaDn += 2*dN\n elif har ==n:\n dN = abs((valorIntegral)**2)\n somaDn += 2 * dN\n potenciaSinal= d0 + somaDn\n return potenciaSinal\n\ndef definePotenciaSinalOriginal(f1,f2):\n valorIntegral = Integral((((f1)**2) / T), (t, -(T/2), T/2)).doit().evalf() +\\\n Integral((((f2)**2 /T)), (t, -(T/2), T/2)).doit().evalf()\n\n return valorIntegral\n\n\ndef defineFase(f1, f2):\n valoresFase = []\n for har in range(-n, n + 1):\n valorIntegral = nsimplify(Integral((((f1) * exp(complex(0, -1) * wZero * har * t)) / T),\n (t, -2, 0))).doit().evalf() + nsimplify(Integral(\n (((f2) * exp(complex(0, -1) * wZero * har * t)) / T), (t, 0, 1))).doit().evalf()\n valorConvertidoComplexToPolar = cmath.polar(valorIntegral)\n\n valorConvertidoImaginario = valorConvertidoComplexToPolar[1]\n\n valoresFase.append(valorConvertidoImaginario)\n return valoresFase\n\ndef defineFaseFuncao2():\n valoresFase = []\n for har in range(-n, n + 1):\n valorIntegral = (exp(-har*wZero*1j)*1j - exp(-har*wZero*2j)*1j - exp(-har*wZero*4j)*1j + exp(-har*wZero*5j)*1j)/(6*har*wZero)\n\n valorConvertidoComplexToPolar = cmath.polar(valorIntegral)\n\n valorConvertidoImaginario = valorConvertidoComplexToPolar[1]\n\n valoresFase.append(valorConvertidoImaginario)\n return valoresFase\n\ndef defineAmplitude(f1, f2):\n valoresAmplitude = []\n for har in range(-n, n + 1):\n valorIntegral = nsimplify(Integral((((f1) * exp(complex(0, -1) * wZero * har * t)) / T),\n (t, -2, 0))).doit().evalf() + nsimplify(Integral(\n (((f2) * exp(complex(0, -1) * wZero * har * t)) / T), (t, 0, 1))).doit().evalf()\n valorConvertidoComplexToPolar = cmath.polar(valorIntegral)\n\n valorConvertidoReal = valorConvertidoComplexToPolar[0]\n\n valoresAmplitude.append(valorConvertidoReal)\n return valoresAmplitude\n\ndef defineAmplitudeFuncao2():\n valoresAmplitude = []\n for har in range(-n, n + 1):\n\n valorIntegral = (exp(-har*wZero*1j)*1j - exp(-har*wZero*2j)*1j - exp(-har*wZero*4j)*1j + exp(-har*wZero*5j)*1j)/(6*har*wZero)\n\n valorConvertidoComplexToPolar = cmath.polar(valorIntegral)\n\n valorConvertidoReal = valorConvertidoComplexToPolar[0]\n\n valoresAmplitude.append(valorConvertidoReal)\n return valoresAmplitude\n\ndef somaHarmonicas(tempo,somaHarmonica, f1,f2):\n start_time = time.time()\n for har in range(-n, n + 1):\n\n valorIntegral = nsimplify(Integral((((f1) * exp(complex(0, -1) * wZero * har * t)) / T), (t, -2, 0))).doit().evalf() + nsimplify(Integral((((f2) * exp(complex(0, -1) * wZero * har * t)) / T), (t, 0, 1))).doit().evalf()\n\n valorFuncao = valorIntegral * exp(complex(0, 1) * har * wZero * tempo)\n\n printPositivos(har, valorIntegral)\n\n somaHarmonica += valorFuncao\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return cmath.polar(somaHarmonica)[0]\n\ndef somaHarmonicasFuncao2(tempo,somaHarmonica):\n start_time = time.time()\n for har in range(-n, n + 1):\n if(har == 0):\n continue\n valorIntegral = (exp((-har) * wZero * complex(0,1)) * complex(0,1) - exp((-har) * wZero * complex(0,2)) * complex(0,1) - exp((-har) * wZero * complex(0,4)) * complex(0,1) + exp((-har) * wZero * complex(0,5)) * complex(0,1)) / (6 * har * wZero)\n valorFuncao = valorIntegral.as_real_imag()[1] * exp(complex(0, 1) * har * wZero * tempo)\n\n somaHarmonica += valorFuncao\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return somaHarmonica.as_real_imag()[1]\n\ndef defineFourier(lista,somaHarmonica, f1, f2):\n for i in range(0, passo):\n eixoX = lista[i]\n eixosX.append(eixoX)\n somaH = somaHarmonicas(lista[i],somaHarmonica,f1, f2)\n eixosYFourier.append(somaH)\n\ndef defineFourierFuncao2(lista,somaHarmonica):\n for i in range(0, passo):\n eixoX = lista[i]\n eixosX.append(eixoX)\n somaH = somaHarmonicasFuncao2(lista[i],somaHarmonica)\n eixosYFourier.append(somaH)\n\ndef printPositivos(har, valorIntegral):\n if (har > 0):\n positivo = {\n 'valor': valorIntegral,\n 'expoente': har\n }\n print(\"\\nValor Dn: \" + str(positivo['valor']) + \"| Expoente = \" + str(positivo['expoente']) + \"\\n\")\n\n\ndef mostraOpcoes(lista, somaHarmonica,f1, f2, n):\n print(\"Escolha as seguintes opções\\n\")\n print(\"1- Amplitude\\n\")\n print(\"2- Fase\\n\")\n print(\"3- Espectro de frequência\\n\")\n print(\"4- Série de Fourier\\n\")\n print(\"5- Valor da distorção harmônica total?\\n\")\n\n resposta = input(\"Qual grafico você gostaria de visualizar?\")\n\n if resposta == \"1\":\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresAmplitude = defineAmplitude(f1 , f2)\n plotarGrafico(x, valoresAmplitude, 'n', '|Dn|', 'Amplitude')\n elif resposta == \"2\":\n # Grafico fase\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresFase = defineFase(f1,f2)\n plotarGrafico(x, valoresFase, 'n', 'Dn((rad))', 'Fase')\n elif resposta == \"3\":\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresAmplitude = defineAmplitude(f1, f2)\n valoresFase = defineFase(f1, f2)\n # Gráfico amplitude\n plotarGrafico(x, valoresAmplitude, 'nw', '|Dn|', 'Amplitude')\n # Grafico fase\n plotarGrafico(x, valoresFase, 'nw', 'Dn((rad))', 'Fase')\n elif resposta == \"4\":\n eixoX = [-6, -5, -3, -2, 0, 1, 3, 4, 6]\n eixoY = [2, 0, 2, 0, 2, 0, 2, 0, 2]\n defineFourier(lista,somaHarmonica,f1,f2)\n plotarGrafico(eixoX, eixoY, 'Funcao Original', 'f(t)', 'Fourier',eixosX,eixosYFourier)\n\n elif resposta == \"5\":\n potenciaSinalFourier = definePotenciaSinal(f1,f2)\n potenciaSinalOriginal = definePotenciaSinalOriginal(f1,f2)\n\n distorcao = (potenciaSinalFourier / potenciaSinalOriginal) * 100\n\n print(\"A distorção harmonica é \" + str(distorcao) + \"%\")\n else:\n print(\"Não existe opção para entrada informada\")\n\ndef mostraOpcoesFuncao2(lista, somaHarmonica,n):\n print(\"Escolha as seguintes opções\\n\")\n print(\"1- Amplitude\\n\")\n print(\"2- Fase\\n\")\n print(\"3- Espectro de frequência\\n\")\n print(\"4- Série de Fourier\\n\")\n\n resposta = input(\"Qual grafico você gostaria de visualizar?\")\n\n if resposta == \"1\":\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresAmplitude = defineAmplitudeFuncao2()\n plotarGrafico(x, valoresAmplitude, 'n', '|Dn|', 'Amplitude','','')\n elif resposta == \"2\":\n # Grafico fase\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresFase = defineFaseFuncao2()\n plotarGrafico(x, valoresFase, 'n', 'Dn((rad))', 'Fase')\n elif resposta == \"3\":\n x = np.linspace(-n, n, (n * 2) + 1)\n valoresAmplitude = defineAmplitudeFuncao2()\n valoresFase = defineFaseFuncao2()\n # Gráfico amplitude\n plotarGrafico(x, valoresAmplitude, 'nw', '|Dn|', 'Amplitude')\n # Grafico fase\n plotarGrafico(x, valoresFase, 'nw', 'Dn((rad))', 'Fase')\n elif resposta == \"4\":\n eixoX = np.array([-12,-11,-10,-9,-8 ,-7 ,-6, -5 ,-4 ,-3, -2 ,-1 ,0 ,1 ,2 ,3, 4 ,5 ,6 ,7 ,8 ,9 ,10, 11, 12 ])\n eixoY = np.array([0,1,1,0,-1 ,-1 , 0 , 1 , 1 , 0 ,-1 ,-1 , 0 , 1 , 1 , 0 ,-1 ,-1 , 0 ,1 ,1 , 0 , -1, -1, 0, ])\n defineFourierFuncao2(lista, somaHarmonica)\n plotarGrafico(eixoX, eixoY, 'Funcao Original', 'f(t)', 'Fourier Degrau',eixosX,eixosYFourier)\n else:\n print(\"Não existe opção para entrada informada\")\n\nwhile(True):\n print(\"Qual função você deseja visualizar?\\n\")\n print(\"Digite 1 para a função (c)\\n\")\n print(\"Digite 2 para a função (e)\\n\")\n\n res = input(\"\")\n\n if res == \"1\":\n T = 3\n wZero = 2 * pi / T\n f1 = t+2\n f2 = -2*t+2\n lista = np.linspace(-3 * 2, 3*2, passo)\n somaHarmonica = 0\n\n n = int(input(\"Digite harmonica: \"))\n mostraOpcoes(lista, somaHarmonica, f1,f2, n)\n\n\n elif res == \"2\":\n T = 6\n wZero = 2 * pi / T\n somaHarmonica = 0\n\n lista = np.linspace(-6*2, 6*2, passo)\n\n n = int(input(\"Digite harmonica: \"))\n mostraOpcoesFuncao2(lista, somaHarmonica,n)\n\n\n else:\n print(\"Resposta inválida!\")\n\n#x_ = np.linspace(-3,3, 100)\n\n#alterar\n#lista = np.linspace(-2*3.14, 2*3.14, 100)\n#lista = np.linspace(-2*2, 2*2, 100)\n#lista = np.linspace(-2*3, 2*3, 100)\n\nprint(\"Escolha as seguintes opções\\n\")\nprint(\"1- Amplitude\\n\")\nprint(\"2- Fase\\n\")\nprint(\"3- Espectro de frequência\\n\")\nprint(\"4- Série de Fourier\\n\")\nprint(\"5- Valor da distorção harmônica total?\\n\")\nresposta = input(\"Qual grafico você gostaria de visualizar?\")\n\n","sub_path":"Descubra o Python/main carol ifsc fft.py","file_name":"main carol ifsc fft.py","file_ext":"py","file_size_in_byte":11306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"532870159","text":"import time\n\nfrom datetime import datetime, timedelta\ntim = time.time()\n\nt = time.localtime()\n# time.struct_time(tm_year=2018, tm_mon=7, tm_mday=15, tm_hour=16, tm_min=26, tm_sec=39, tm_wday=6, tm_yday=196, tm_isdst=0)\n# \n\nnow_time = \"%d-%02d-01\"%(t.tm_year,t.tm_mon)\n# 2018-07-01\nnow_time_more = datetime.strptime(now_time,\"%Y-%m-%d\")\n# 2018-07-01 00:00:00\n\ntoday_time = \"%d-%02d-%02d\"%(t.tm_year,t.tm_mon,t.tm_mday)\n# 2018-07-15 哪天写的就是哪天的日期\ntoday_begin_time = datetime.strptime(today_time,\"%Y-%m-%d\")\n# 2018-07-15 00:00:00\n\n\ntoday_begin_time = today_begin_time - timedelta(days=0)\ntoday_end_time = today_begin_time - timedelta(days=-1)\n\nprint(tim)\n\nprint(t)\nprint(now_time)\nprint(today_time)\nprint(now_time_more)\nprint(today_begin_time,\"------\",today_end_time)\n\n","sub_path":"information_web/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"406862355","text":"#!/bin/python3\n\nimport sys\n\n\nn = int(input().strip())\nphone_book = {}\nfor i in range(0, n):\n name, phone_number = input().split(' ')\n phone_book[name] = phone_number\n\nwhile True:\n try:\n query = input().strip()\n if query in phone_book:\n print('%s=%s' % (query, phone_book[query]))\n else:\n print('Not found')\n except:\n break","sub_path":"python/30-days-of-code/day-8.py","file_name":"day-8.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"555058812","text":"import schedule\r\nimport time\r\nimport sys\r\nimport os\r\n\r\nfrom datetime import datetime\r\n\r\ndef run():\r\n\ttry:\r\n\t\tuser = sys.argv[1]\r\n\t\tprint(f'Execute at: {datetime.now()}')\r\n\t\tos.system(f'check_user.py {user}')\r\n\texcept Exception as e:\r\n\t\tprint(f'Error: {e}')\r\n\r\ndef main():\r\n\trun()\r\n\tschedule.every(1).minutes.do(run)\r\n\r\n\twhile True:\r\n\t\tschedule.run_pending()\r\n\t\ttime.sleep(1)\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"576375235","text":"# coding:utf-8\n\n\nimport cPickle as pickle\nimport numpy as np\nimport os\nimport scipy.misc\nimport random\nimport shutil\n\n\nimages_dir = \"./nomal_images/\"\ntrain_image_dir = \"./train_images/\"\ntest_image_dir = \"./test_images/\"\nid_dict_path = \"./76587_id_vec.p\"\nin_shape = (48, 48, 3)\n\n\ndef load_image(path):\n img = scipy.misc.imread(path, mode=\"RGB\")\n if img.shape != in_shape:\n return\n return img\n\ndef get_dirfile(dir_path):\n filenames = os.listdir(dir_path)\n return filenames\n\ndef load_id_dict(id_dict_path):\n with open(id_dict_path, \"rb\") as f:\n id_dict = pickle.load(f)\n print(id_dict_path + \"をロードしました.\")\n return id_dict\n\ndef get_yet(all_files, traind_files):\n set_yet = set(all_files) - set(traind_files)\n yet_files = list(set_yet) \n return yet_files\n\ndef init():\n for i in [train_image_dir, test_image_dir]:\n if not os.path.isdir(i):\n os.makedirs(i)\n\ndef mk_train_test():\n init()\n img_files = get_dirfile(images_dir)\n usable = []\n\n \"\"\" remove image useless\"\"\"\n # 読み込む時に使えない写真(RGBじゃないやshapeが違うのを削除)\n for i in img_files:\n img = load_image(images_dir + i)\n if img is None:\n continue\n else:\n usable.append(i)\n\n print(\"img_files\", len(img_files))\n print(\"usable picture\", len(usable))\n \n # 今回使うid_dictから写真をまとめる\n id_vec = load_id_dict(id_dict_path)\n new_usable = []\n\n for i in usable:\n tmp_id = i.split(\".\")[0]\n usr_id = int(tmp_id.split(\"_\")[0])\n\n if usr_id in id_vec.keys():\n new_usable.append(i)\n else:\n pass\n \n print(\"new_usable(filter:id_dict)\", len(new_usable))\n\n #trainf = random.sample(new_usable, len(new_usable) * 0.8)\n #testf = get_yet(new_usable, trainf)\n\n for i in new_usable:\n shutil.copy2(images_dir + i, train_image_dir + i)\n #for i in testf:\n # shutil.copy2(images_dir + i, test_image_dir + i)\n\nif __name__ == \"__main__\":\n mk_train_test()\n","sub_path":"mk_dataset.py","file_name":"mk_dataset.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"287809975","text":"#\n# CORE\n# Copyright (c)2010-2012 the Boeing Company.\n# See the LICENSE file included in this distribution.\n#\n''' Sample user-defined service.\n'''\n\nimport os\n\nfrom core.service import CoreService, addservice\nfrom core.misc.ipaddr import IPv4Prefix, IPv6Prefix\n\nclass TrafficDump(CoreService):\n ''' This is a sample user-defined service.\n '''\n # a unique name is required, without spaces\n _name = \"TrafficDump\"\n # you can create your own group here\n _group = \"WAN_BGP\"\n # list of other services this service depends on\n _depends = ()\n # per-node directories\n _dirs = ()\n # generated files (without a full path this file goes in the node's dir,\n # e.g. /tmp/pycore.12345/n1.conf/)\n _configs = ('startdump.sh', )\n # this controls the starting order vs other enabled services\n _startindex = 10\n # list of startup commands, also may be generated during startup\n _startup = ('sh startdump.sh',)\n # list of shutdown commands\n _shutdown = ('killall tcpdump')\n\n @classmethod\n def generateconfig(cls, node, filename, services):\n ''' Start TCPDUMP on interface eth0. Returns a string that will be written to filename,\n or sent to the GUI for user customization.\n '''\n cfg = \"#!/bin/sh\\n\"\n cfg += \"# auto-generated by TrafficDump (trafficdump.py)\\n\"\n for ifc in node.netifs():\n cfg += 'echo \"Node %s has interface %s\"\\n' % (node.name, ifc.name)\n # here we do something interesting\n cfg += \"\\n\".join(map(cls.subnetentry, ifc.addrlist))\n break\n cfg += \"\\n\"\n cfg += \"# Select the interface(s) to dump\\n\"\n cfg += \"tcpdump -U -i eth0 -w - >/home/vagrant/output/traffic-%s-eth0.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth1 -w - >/home/vagrant/output/traffic-%s-eth1.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth2 -w - >/home/vagrant/output/traffic-%s-eth2.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth3 -w - >/home/vagrant/output/traffic-%s-eth3.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth4 -w - >/home/vagrant/output/traffic-%s-eth4.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth5 -w - >/home/vagrant/output/traffic-%s-eth5.pcap & \\n\" % (node.name)\n cfg += \"#tcpdump -U -i eth6 -w - >/home/vagrant/output/traffic-%s-eth6.pcap & \\n\" % (node.name)\n return cfg\n\n @staticmethod\n def subnetentry(x):\n ''' Generate a subnet declaration block given an IPv4 prefix string\n for inclusion in the config file.\n '''\n if x.find(\":\") >= 0:\n # this is an IPv6 address\n return \"\"\n else:\n net = IPv4Prefix(x)\n return 'echo \" network %s\"' % (net)\n\n# this line is required to add the above class to the list of available services\naddservice(TrafficDump)\n","sub_path":"Lab_6/data/core/myservices/trafficdump.py","file_name":"trafficdump.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"187967310","text":"import docutils\nimport docutils.nodes\nimport docutils.utils\nimport pybtex.database\nimport pybtex.plugin\nimport pytest\nfrom pybtex.richtext import HRef, Tag, Text\n\nfrom pybtex_docutils import Backend\n\n\ndef render_str(richtext):\n return \"\".join(str(node) for node in richtext.render(Backend()))\n\n\n# may remove this test when new pybtex is out\ndef test_text():\n assert Backend().format_text(\"hi\") == Backend().format_str(\"hi\")\n\n\ndef test_tag():\n tag = Tag(\"em\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_text():\n tag = Tag(\"em\", Text(\"hello\", \" world\"))\n assert render_str(tag) == \"hello world\"\n\n\ndef test_tag_strong():\n tag = Tag(\"strong\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_i():\n tag = Tag(\"i\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_b():\n tag = Tag(\"b\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_tt():\n tag = Tag(\"tt\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_sup():\n tag = Tag(\"sup\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_sub():\n tag = Tag(\"sub\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_tag_unknown():\n tag = Tag(\"***unknown***\", \"hello\")\n assert render_str(tag) == \"hello\"\n\n\ndef test_href():\n href = HRef(\"http://www.example.com\", \"hyperlinked text\")\n assert render_str(href) == (\n '' \"hyperlinked text\" \"\"\n )\n\n\ndef test_href_text():\n href = HRef(\"http://www.example.com\", Text(\"hyperlinked\", \" text\"))\n assert render_str(href) == (\n '' \"hyperlinked text\" \"\"\n )\n\n\ndef test_render_sequence():\n text = Text(\"hello \", Tag(\"em\", \"world\"))\n assert render_str(text) == \"hello world\"\n\n\n@pytest.fixture\ndef entry():\n data = pybtex.database.BibliographyData(\n {\n \"hongquin1997\": pybtex.database.Entry(\n \"article\",\n fields={\n \"language\": \"english\",\n \"title\": \"Predicting the Diffusion Coefficient\"\n \" in Supercritical Fluids\",\n \"journal\": \"Ind. Eng. Chem. Res.\",\n \"volume\": \"36\",\n \"year\": \"1997\",\n \"pages\": \"888-895\",\n },\n persons={\n \"author\": [\n pybtex.database.Person(\"Liu, Hongquin\"),\n pybtex.database.Person(\"Ruckenstein, Eli\"),\n ]\n },\n )\n }\n )\n style = pybtex.plugin.find_plugin(\"pybtex.style.formatting\", \"plain\")()\n entries = list(style.format_entries(data.entries.values()))\n return entries[0]\n\n\n@pytest.fixture\ndef document():\n return docutils.utils.new_document(\"test.rst\")\n\n\ndef test_citation(entry, document):\n node = Backend().citation(entry, document)\n assert str(node) == (\n ''\n \"\"\n \"\"\n \"Hongquin Liu and Eli Ruckenstein. \"\n \"Predicting the diffusion coefficient in supercritical fluids. \"\n \"Ind. Eng. Chem. Res., \"\n \"36:888–895, 1997.\"\n \"\"\n \"\"\n )\n\n\ndef test_citation_reference(entry, document):\n node = Backend().citation_reference(entry, document)\n id_ = \"id1\" if docutils.__version_info__ < (0, 18) else \"citation-reference-1\"\n assert str(node) == (\n f''\n f\"hongquin1997\"\n f\"\"\n )\n\n\ndef test_citation_use_label(entry, document):\n node = Backend().citation(entry, document, use_key_as_label=False)\n assert str(node) == (\n ''\n \"\"\n \"\"\n \"Hongquin Liu and Eli Ruckenstein. \"\n \"Predicting the diffusion coefficient in supercritical fluids. \"\n \"Ind. Eng. Chem. Res., \"\n \"36:888–895, 1997.\"\n \"\"\n \"\"\n )\n\n\ndef test_citation_reference_use_label(entry, document):\n node = Backend().citation_reference(entry, document, use_key_as_label=False)\n id_ = \"id1\" if docutils.__version_info__ < (0, 18) else \"citation-reference-1\"\n assert str(node) == (\n f''\n f\"1\"\n f\"\"\n )\n\n\ndef test_footnote(entry, document):\n node = Backend().footnote(entry, document)\n assert str(node) == (\n ''\n \"\"\n \"Hongquin Liu and Eli Ruckenstein. \"\n \"Predicting the diffusion coefficient in supercritical fluids. \"\n \"Ind. Eng. Chem. Res., \"\n \"36:888–895, 1997.\"\n \"\"\n \"\"\n )\n\n\ndef test_footnote_reference(entry, document):\n node = Backend().footnote_reference(entry, document)\n id_ = \"id1\" if docutils.__version_info__ < (0, 18) else \"footnote-reference-1\"\n assert str(node) == (\n f''\n )\n\n\ndef test_write_entry():\n with pytest.raises(NotImplementedError):\n Backend().write_entry(None, None, None)\n","sub_path":"test/test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320810274","text":"#!/usr/bin/env python3\n\n\"\"\"\nThe lazygrid program allows to build command lines array from a configuration file.\n\nCommand line arrays are text files containing one set of command line arguments at each line.\n\nThis is usefull for grid search: you can launch the same script many times with different arguments stored in the command line array.\n\n\nHow does this work?\n-------------------\n\nYou can find an example of configuration file just beside this script: `lazyfile_example.yml`. There is also an example output of such configuration file in `arrayparam_example.txt`.\nJust like Makefiles, the lazyfile is structured by rules that rely upon each other.\n\nThe rule `all` is the master rule of the file, this is the one that will be executed and its absence will result in an error. The rule `all` calls\nother rules. In contrary to other rules, the rule `all` will not concatenate the output of its subrule but simply launch them, one after the other.\n\nA rule can have keyword parameters or simple parameters:\n\n{\"--this-is-a-kw-parameter\": [\"kw parameter can take one value\", \"or one other value\"]}\n\n[\"--this-is-a-simple-boolean-parameter\", \"--this-is-an-other-simple-boolean-parameter\"]\n\n\nUsage:\n lazygrid -l lazyfile\n\nOptions:\n -l --lazyfile lazyfile The input configuration yml file.\n\"\"\"\n\nimport yaml\nfrom collections import OrderedDict\nfrom pprint import pprint\nimport copy\nimport numpy as np\nimport math\nimport os\nimport time\nimport random\n\n# todo specify external modules (ex numpy/maths) in the yaml file\nfrom docopt import docopt\nfrom pathlib import Path\n\ndef build_cmd(dict_arg, lazyfilename):\n cmd_lines = []\n try:\n todo_cmd_lines = dict_arg[\"all\"].keys()\n except KeyError:\n raise KeyError(\"There should be a section 'all'\")\n\n if list(dict_arg.keys())[0] != \"all\":\n raise ValueError(\"The first section of the configuration file should be 'all'\")\n todo_cmd_lines_cases = list(dict_arg.keys())[1:]\n cmd_line_cases = {}\n for case in todo_cmd_lines_cases:\n try:\n case_section = dict_arg[case]\n except KeyError:\n raise KeyError(\"Section {} referenced in all but does not exist\".format(case))\n cmd_line_case = [\"\"]\n for key, value in case_section.items():\n value = eval(str(value))\n tmp_cmd_line_case = []\n if type(value) == list:\n for cmd in cmd_line_case:\n for elm in value:\n tmp_cmd_line_case.append(\" \".join([cmd, elm]).strip())\n elif type(value) == OrderedDict:\n for cmd in cmd_line_case:\n for key_arg, value_arg in value.items():\n formated_value_arg = (f\"\" + str(value_arg)).format(LAZYFILE=lazyfilename)\n lst_value_arg = eval(str(formated_value_arg))\n for value_arg in lst_value_arg:\n tmp_cmd_line_case.append(\" \".join([cmd, str(key_arg) + \" \" + str(value_arg)]).strip())\n elif value is None:\n try:\n to_add_cmd_line = cmd_line_cases[key]\n except KeyError:\n raise KeyError(\"{} is referenced in {} but doesnt exist. Make sure it is defined BEFORE the section {}\".format(key, case, case))\n\n for cmd in cmd_line_case:\n for cmd_line_to_add in to_add_cmd_line:\n tmp_cmd_line_case.append(\" \".join([cmd, cmd_line_to_add]))\n\n else:\n raise Exception\n cmd_line_case = copy.deepcopy(tmp_cmd_line_case)\n cmd_line_cases[case] = cmd_line_case\n for todo_cmd_line in todo_cmd_lines:\n cmd_lines.extend(cmd_line_cases[todo_cmd_line])\n return cmd_lines\n\n\ndef ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)\n\n\ndef main():\n arguments = docopt(__doc__)\n abspath_lazyfile = os.path.abspath(arguments[\"--lazyfile\"])\n with open(abspath_lazyfile) as f:\n dataMap = ordered_load(f)\n final_cmd_lines = build_cmd(dataMap, lazyfilename=\"/\".join(abspath_lazyfile.split(\"/\")[-3:]).split(\".\")[0])\n for line in final_cmd_lines:\n print(line)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n","sub_path":"lazygrid.py","file_name":"lazygrid.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"147159331","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : huxiansheng (you@example.org)\n\nimport os\nimport unittest\nfrom common_class.Root_set_up import root_xpath\n# 获取项目绝对路劲并且组合需要的新路径\nx = root_xpath()\ndir = x.get_root_path()\n\ncase_path = dir + '/test_case'\n\nsuite = unittest.TestLoader().discover(case_path)\n\nprint(suite)\nif __name__ == '__main__':\n # 执行用例\n runner = unittest.TextTestRunner()\n runner.run(suite)","sub_path":"test_case/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"548920152","text":"import hashlib\nimport os\n\nh = hashlib.sha256(b'hw.py')\nmy_dir = '/Users/xredian/Documents/EpamPython2019/11-programming-and-debugging/hw/'\nmy_hash = h.hexdigest()\n\n\ndef path(directory, hash256):\n paths = []\n files = os.listdir(directory)\n for file in files:\n file_hash = hashlib.sha256(bytes(file, encoding='utf8')).hexdigest()\n if file_hash == hash256:\n paths.append(os.path.abspath(file))\n return paths\n\n\nprint(path(my_dir, my_hash))\n\n\"\"\"\n1) sudo dtruss -c python3 task2.py 2> ./trace.txt\n>>>\nmost commonly used system call is stat64 = 208 calls\n\n2) python3 -m cProfile -s time task2.py 1000001 > ./profile.txt\n>>>\nthe \"hottest\" piece of code is {built-in method _imp.create_dynamic}\n ncalls tottime percall cumtime percall filename:lineno(function)\n 3 0.011 0.004 0.011 0.004 {built-in method _imp.create_dynamic}\n \n3) strace -c python3 task2.py 2> ./time.txt\nmost time consuming system call is mmap \n\n% time seconds usecs/call calls errors syscall\n------ ----------- ----------- --------- --------- ----------------\n 16,67 0,000538 9 55 mmap\n \n \nfull results are in files trace.txt, profile.txt and time.txt\n\"\"\"\n\n\n","sub_path":"11-programming-and-debugging/hw/task2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"492876564","text":"#Name : Joseph Cormier\n#Class : CSCI 141\n#Assignment : process student responses\n#I hope to be retired in 10 years, I have high hopes for myself and the 3 other students who answered the same, but we really just don't know how its gonna end up\n#It took me a solid week to actually get this done, I've probably had this file open in thonny for like an elapsed 15 hours and for just about all of that I haven't touched it\n#I hope that doesn't foreshadow how long it will take me to retire...\n\n#users enter a keyword and the program outputs the number of its instances\ndef keywordCount(fullList):\n fullString = \" \".join(fullList)\n keyword = input(\"What keyword would you like to check? (case sensitive) \")\n print(fullString.count(keyword))\n#outputs the average months students have been programming\ndef averageMonths(fullList):\n monthCount = 0\n totalCount = 0\n for x in range(0,len(fullList)):\n totalCount = totalCount + 1\n #make the current answer a string for testing\n oneString = \"\".join(fullList[x])\n #if the string converts to an int its a good answer and is added to the total\n try:\n num = int(oneString)\n #if the user answer isn't good it defaults to 0\n except:\n num = 0\n monthCount = monthCount+num\n print(\"{:.2f}\".format(monthCount/totalCount))\n#outputs average months of programming for students with a user chosen keyword in their response\ndef averageMonthsWithKeyword(fullList):\n monthCount = 0\n totalCount = 0\n keyword = input(\"What keyword would you like to check user month average? (case sensitive) \")\n for x in range(0,len(fullList[0])):\n #turn current 3rd answer into string, test for keyword\n oneString = \"\".join(fullList[2][x])\n #if keyword is there convert the corresponding 1st answer to an int, if it isn't good it defaults to 0\n if keyword in oneString:\n totalCount = totalCount +1\n oneNumString = \"\".join(fullList[0][x])\n try:\n num = int(oneNumString)\n except:\n num = 0\n monthCount = monthCount+num\n print(\"{:.1f}\".format(monthCount/totalCount))\n#prints out the percent of students who know about for loops\ndef percentLoop(fullList):\n totalCount = 0\n totalYes = 0\n for x in range(0,len(fullList)):\n totalCount = totalCount + 1\n #convert current answer to string, test for yes\n oneString = \"\".join(fullList[x])\n if \"yes\" in oneString.lower():\n totalYes = totalYes + 1\n print(\"{:.2f}\".format(totalYes/totalCount))\n#prints percent of students who are first gen college students\ndef firstGen(fullList):\n totalCount = 0\n totalYes = 0\n for x in range(0,len(fullList)):\n totalCount = totalCount + 1\n #convert current answer to string, test for yes\n oneString = \"\".join(fullList[x])\n if \"yes\" in oneString.lower():\n totalYes = totalYes + 1\n print(\"{0:.2%}\".format(totalYes/totalCount)) #yeah, one more decimal then in the example, im tired and I can probably eat a -1 on this assignment :^)\n#hey, its only like 26 lines of code, don't get mad at me for following the rules \\n\\n\\n\\n\\n\\n :^)\ndef main():\n #prepare list and get file\n fullList = [[],[],[],[]]\n fileInput = open(input(\"What is the name of the data file \"), \"r\")\n #now that we have our file read we can put it all into a 2d list, based on the separator |\n for line in fileInput:\n answers = line.split(\"|\")\n fullList[0].append(answers[0])\n fullList[1].append(answers[1])\n fullList[2].append(answers[2])\n fullList[3].append(answers[3].rstrip(\"\\n\")) #removing the \\n at the end of each line \n #loop data parsing until the user is finished, letting them parse in multiple ways \n while True:\n #tell user how they can parse the data\n print(\"===========================\\nSelect an analysis option\\n\\n'1' - Perform a keyword count from the answers to question 3\\n'2' - Calculate the average months of coding experience\\n'3' - Calculate average months of coding experience among all students who have a specific keyword in their answer to question 3 of the survey\\n'4' - Calculate the percent of students familiar with for loops\\n'5' - Calculate the percent of students who are first generation college students\\n'6' - Quit\")\n #get input\n choice = input(\"\\nWhat analysis do you want to perform? \")\n #apply logic based on choice, nice\n if choice==\"1\":\n #we only need the 3rd answer from each student, so we pull that specific list\n keywordCount(fullList[2])\n elif choice==\"2\":\n #only want the 1st answers\n averageMonths(fullList[0])\n elif choice==\"3\":\n #only need the 1st and 3rd answers, lazy method, could be optimized instead of grabbing the entire list\n averageMonthsWithKeyword(fullList)\n elif choice==\"4\":\n #only want 2nd answers\n percentLoop(fullList[1])\n elif choice==\"5\":\n #only want 4th answers\n firstGen(fullList[3])\n elif choice==\"6\":\n break\n else:\n #they entered something wrong\n print(\"Please enter a single digit, 1 through 6\")\nmain() #run this bad boy","sub_path":"processUserData.py","file_name":"processUserData.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"324881843","text":"# Released under the MIT license. See the LICENSE file for more information.\n# https://github.com/ololobster/cvidone\n\n\nfrom .section import Section\n# Libs.\nimport datetime as dt\n\n\nclass Task(object):\n @staticmethod\n def createEmptyInstance():\n task = Task()\n task._id = None\n task._section = None\n task._in_completed_tasks_tree = False\n task._parent_id = None\n task._completed = False\n task._name = \"\"\n task._comment = \"\"\n task._deadline = None\n task._creation_time = None\n task._completion_time = None\n task._unfolded = True\n return task\n\n\n @staticmethod\n def getExistingInstance(db, id):\n raw_data = db.selectOne(\"\"\"\n SELECT section_id, parent_id, in_completed_tasks_tree, name, comment, deadline, creation_time, completion_time, unfolded\n FROM tasks\n WHERE (id = %s)\n \"\"\", id)\n\n task = Task()\n task._id = id\n task._section = Section.getExisting(db, raw_data[\"section_id\"])\n task._in_completed_tasks_tree = raw_data[\"in_completed_tasks_tree\"]\n task._parent_id = raw_data[\"parent_id\"]\n task._completed = raw_data[\"completion_time\"] is not None\n task._name = raw_data[\"name\"]\n task._comment = raw_data[\"comment\"]\n task._deadline = raw_data[\"deadline\"]\n task._creation_time = raw_data[\"creation_time\"]\n task._completion_time = raw_data[\"completion_time\"]\n task._unfolded = raw_data[\"unfolded\"]\n return task\n\n\n def save(self, db):\n raw_data = {\n \"section_id\": self._section.id\n , \"in_completed_tasks_tree\": self._in_completed_tasks_tree\n , \"parent_id\": self._parent_id\n , \"name\": self._name\n , \"comment\": self._comment\n , \"unfolded\": self._unfolded\n }\n if (self._deadline is None):\n raw_data[\"deadline\"] = None\n else:\n raw_data[\"deadline\"] = self._deadline.strftime(\"%Y-%m-%d\")\n\n if (self._completed) and (self._completion_time is None):\n self._completed_time = dt.datetime.utcnow()\n raw_data[\"completion_time\"] = self._completed_time\n elif (not self._completed) and (self._completion_time is not None):\n self._completion_time = None\n raw_data[\"completion_time\"] = None\n\n if (self.isNew()):\n self._id = db.insert(\"tasks\", raw_data)[\"id\"]\n else:\n # Dont worry about children, stored triggers will take care of them.\n db.updateSmart(\"tasks\", raw_data, [(\"id = %s\", self._id)])\n\n\n def remove(self, db):\n assert(not self.isNew())\n db.remove(\"tasks\", [(\"id = %s\", self._id)])\n self._id = None\n\n\n def updatePlace(self, db, place):\n assert(not self.isNew())\n db.update(query=\"SELECT setNodePlace('tasks', %i, %i)\" % (self._id, place))\n\n\n @property\n def id(self):\n return self._id\n def isNew(self):\n return (self._id is None)\n\n @property\n def section(self):\n return self._section\n @section.setter\n def section(self, section):\n if (self._section is not None) and (self._section.id == section.id):\n return\n self._section = section\n self.moveToRoot()\n\n\n def getParent(self, db):\n return None if (self.isInRoot()) else Task.getExistingInstance(db, self._parent_id)\n def setParent(self, parent):\n if (self._parent_id == parent.id):\n return\n self._section = parent.section\n self._parent_id = parent.id\n self._in_completed_tasks_tree = parent._in_completed_tasks_tree\n if (parent.getCompletedState()):\n self._completed = True\n\n\n def isInRoot(self):\n s = self._section\n return (self._parent_id in (s.root_active_task_id, s.root_completed_task_id))\n def moveToRoot(self):\n if (self.isInRoot()):\n return\n if (self._completed):\n self._in_completed_tasks_tree = True\n self._parent_id = self._section.root_completed_task_id\n else:\n self._in_completed_tasks_tree = False\n self._parent_id = self._section.root_active_task_id\n\n\n def getCompletedState(self):\n return self._completed\n def setCompletedState(self, db, completed):\n if (self._completed == completed):\n return\n\n if (self.isInRoot()):\n if (completed):\n self._in_completed_tasks_tree = True\n self._parent_id = self._section.root_completed_task_id\n else:\n self._in_completed_tasks_tree = False\n self._parent_id = self._section.root_active_task_id\n elif (False == completed): # If not in root and not completed.\n assert(False == self.getParent(db).getCompletedState())\n self._completed = completed\n\n\n @property\n def name(self):\n return self._name\n @name.setter\n def name(self, name):\n self._name = name\n\n @property\n def comment(self):\n return self._comment\n @comment.setter\n def comment(self, new_comment):\n self._comment = new_comment\n\n @property\n def deadline(self):\n return \"\" if (self._deadline is None) else self._deadline.strftime(\"%d.%m.%Y\")\n @deadline.setter\n def deadline(self, deadline):\n self._deadline = deadline\n\n @property\n def unfolded(self):\n return self._unfolded\n @unfolded.setter\n def unfolded(self, unfolded_state):\n self._unfolded = unfolded_state\n\n @property\n def creation_time(self):\n return self._creation_time\n @property\n def completion_time(self):\n return self._completion_time\n\n\n def getLabelsIds(self, db, user):\n assert(not self.isNew())\n return db.select(\"\"\"\n SELECT label_id\n FROM tasks_labels, labels\n WHERE (task_id = %s) AND (label_id = labels.id) AND (labels.user_id = %s)\n \"\"\", self._id, user.id)\n\n\n def setLabels(self, db, labels_ids):\n assert(not self.isNew())\n db.remove(\"tasks_labels\", [(\"task_id = %s\", self._id)])\n for label_id in labels_ids:\n db.insert(\"tasks_labels\", {\"task_id\": self._id, \"label_id\": label_id})\n\n\n # Args `date_from` and `date_to` are instances of datetime.date.\n @staticmethod\n def search(\n db\n , user\n , search_zone\n , date_from=None\n , date_to=None\n , label_id=None\n ):\n tables = [\"tasks\", \"sections_instances\"]\n restrictions = [\n (\"sections_instances.user_id = %s\", user.id)\n , \"sections_instances.section_id = tasks.section_id\"\n , \"tasks.name IS NOT NULL\"\n ]\n\n if (\"active\" == search_zone):\n restrictions.append(\"completion_time IS NULL\")\n elif (\"completed\" == search_zone):\n restrictions.append(\"completion_time IS NOT NULL\")\n\n if (date_from is not None) and (date_to is not None):\n restrictions.append((\n \"(deadline BETWEEN %s AND %s) OR (completion_time BETWEEN (%s AT TIME ZONE %s) AND (%s AT TIME ZONE %s))\"\n , date_from\n , date_to\n , dt.datetime.combine(date_from, dt.time())\n , user.time_zone\n , dt.datetime.combine(date_to, dt.time(23, 59, 59))\n , user.time_zone\n ))\n elif (date_from is not None):\n restrictions.append((\n \"(deadline >= %s) OR (completion_time >= %s AT TIME ZONE %s)\"\n , date_from\n , dt.datetime.combine(date_from, dt.time())\n , user.time_zone\n ))\n elif (date_to is not None):\n restrictions.append((\n \"(deadline <= %s) OR (completion_time <= %s) AT TIME ZONE %s)\"\n , date_to\n , dt.datetime.combine(date_to, dt.time(23, 59, 59))\n , user.time_zone\n ))\n\n if (label_id is not None):\n tables.append(\"tasks_labels\")\n restrictions.append(\"tasks.id = tasks_labels.task_id\")\n restrictions.append((\"tasks_labels.label_id = %s\", label_id))\n\n tasks = db.selectSmart(\n fields=(\"tasks.id\", \"tasks.name\", \"completion_time\", \"deadline\", \"sections_instances.id AS si_id\")\n , tables=tables\n , restrictions=restrictions\n , order_by=\"deadline\"\n , limit=50\n )\n return tasks\n","sub_path":"cvidone/model/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"615184232","text":"def order_pick(files, top):\n \"\"\"\n :type files: list\n :type top: int\n \"\"\"\n n = 0\n for k, v in files:\n if n < top:\n print(k, v)\n n += 1\n else:\n break\n\n\ndef counting(urls):\n \"\"\"\n :type urls: list\n :rtype: list\n \"\"\"\n dc = dict()\n for row in urls:\n file = row.split(\"/\")[-1]\n dc[file] = dc.get(file, 0) + 1\n return [(k, dc[k]) for k in sorted(dc.keys())]\n\n\ndef main():\n urls = [\n \"http://www.google.com/a.txt\",\n \"http://www.google.com.tw/a.txt\",\n \"http://www.google.com/download/c.jpg\",\n \"http://www.google.co.jp/a.txt\",\n \"http://www.google.com/b.txt\",\n \"https://facebook.com/movie/b.txt\",\n \"http://yahoo.com/123/000/c.jpg\",\n \"http://gliacloud.com/haha.png\"\n ]\n n = 3\n ordered_file = counting(urls)\n order_pick(ordered_file, n)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ex1_1.py","file_name":"ex1_1.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"2509953","text":"import logging\nimport os\nimport sys\nfrom tempfile import TemporaryDirectory\n\n\nfrom autumn.tools import db, plots\nfrom autumn.settings import REMOTE_BASE_DIR\nfrom autumn.tools.utils.parallel import report_errors, run_parallel_tasks, gather_exc_plus\nfrom autumn.tools.utils.fs import recreate_dir\nfrom autumn.tools.utils.s3 import upload_to_run_s3, get_s3_client\nfrom autumn.tools.utils.timer import Timer\n\nfrom .utils import get_project_from_run_id, set_logging_config\n\nlogger = logging.getLogger(__name__)\n\n\nos.makedirs(REMOTE_BASE_DIR, exist_ok=True)\n\nCALIBRATE_DATA_DIR = os.path.join(REMOTE_BASE_DIR, \"data\", \"calibration_outputs\")\nCALIBRATE_PLOTS_DIR = os.path.join(REMOTE_BASE_DIR, \"plots\")\nCALIBRATE_LOG_DIR = os.path.join(REMOTE_BASE_DIR, \"logs\")\nCALIBRATE_DIRS = [CALIBRATE_DATA_DIR, CALIBRATE_PLOTS_DIR, CALIBRATE_LOG_DIR]\nMLE_PARAMS_PATH = os.path.join(CALIBRATE_DATA_DIR, \"mle-params.yml\")\n\n\ndef calibrate_task(run_id: str, runtime: float, num_chains: int, verbose: bool):\n s3_client = get_s3_client()\n\n # Set up directories for plots and output data.\n with Timer(f\"Creating calibration directories\"):\n for dirpath in CALIBRATE_DIRS:\n recreate_dir(dirpath)\n\n # Run the actual calibrations\n with Timer(f\"Running {num_chains} calibration chains\"):\n args_list = [\n (run_id, runtime, chain_id, num_chains, verbose) for chain_id in range(num_chains)\n ]\n try:\n chain_ids = run_parallel_tasks(run_calibration_chain, args_list, False)\n cal_success = True\n except Exception as e:\n # Calibration failed, but we still want to store some results\n cal_success = False\n \n with Timer(\"Uploading logs\"):\n upload_to_run_s3(s3_client, run_id, CALIBRATE_LOG_DIR, quiet=not verbose)\n\n with Timer(\"Uploading run data\"):\n upload_to_run_s3(s3_client, run_id, CALIBRATE_DATA_DIR, quiet=not verbose)\n \n if not cal_success:\n logger.info(\"Terminating early from failure\")\n sys.exit(-1)\n\n # Upload the calibration outputs of AWS S3.\n #with Timer(f\"Uploading calibration data to AWS S3\"):\n # for chain_id in chain_ids:\n # with Timer(f\"Uploading data for chain {chain_id} to AWS S3\"):\n # src_dir = os.path.join(CALIBRATE_DATA_DIR, f\"chain-{chain_id}\")\n # upload_to_run_s3(s3_client, run_id, src_dir, quiet=not verbose)\n\n # Create plots from the calibration outputs.\n with Timer(f\"Creating post-calibration plots\"):\n project = get_project_from_run_id(run_id)\n plots.calibration.plot_post_calibration(\n project.plots, CALIBRATE_DATA_DIR, CALIBRATE_PLOTS_DIR, priors=[]\n )\n\n # Upload the plots to AWS S3.\n with Timer(f\"Uploading plots to AWS S3\"):\n upload_to_run_s3(s3_client, run_id, CALIBRATE_PLOTS_DIR, quiet=not verbose)\n\n # Find the MLE parameter set from all the chains.\n with Timer(f\"Finding max likelihood estimate params\"):\n database_paths = db.load.find_db_paths(CALIBRATE_DATA_DIR)\n with TemporaryDirectory() as tmp_dir_path:\n collated_db_path = os.path.join(tmp_dir_path, \"collated.db\")\n db.process.collate_databases(\n database_paths, collated_db_path, tables=[\"mcmc_run\", \"mcmc_params\"]\n )\n db.store.save_mle_params(collated_db_path, MLE_PARAMS_PATH)\n\n # Upload the MLE parameter set to AWS S3.\n with Timer(f\"Uploading max likelihood estimate params to AWS S3\"):\n upload_to_run_s3(s3_client, run_id, MLE_PARAMS_PATH, quiet=not verbose)\n\n with Timer(f\"Uploading final logs to AWS S3\"):\n upload_to_run_s3(s3_client, run_id, 'log', quiet=not verbose)\n\n@report_errors\ndef run_calibration_chain(\n run_id: str, runtime: float, chain_id: int, num_chains: int, verbose: bool\n):\n \"\"\"\n Run a single calibration chain.\n \"\"\"\n set_logging_config(verbose, chain_id, CALIBRATE_LOG_DIR, task='calibration')\n logging.info(\"Running calibration chain %s\", chain_id)\n os.environ[\"AUTUMN_CALIBRATE_DIR\"] = CALIBRATE_DATA_DIR\n\n try:\n project = get_project_from_run_id(run_id)\n project.calibrate(runtime, chain_id, num_chains)\n except Exception:\n logger.exception(\"Calibration chain %s failed\", chain_id)\n gather_exc_plus(os.path.join(CALIBRATE_LOG_DIR, f\"crash-calibration-{chain_id}.log\"))\n raise\n logging.info(\"Finished running calibration chain %s\", chain_id)\n return chain_id\n","sub_path":"autumn/tasks/calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395128582","text":"#coding=utf-8\n\nimport requests\nimport configparser\nimport os\nimport sys\nimport glob\nfrom PIL import Image\n\n\ndef checkBeforeRunning():\n '''\n This function checks whether config.ini has been modified correctly.\n If not, return False.\n '''\n modified=False\n try: #In case that config.ini doesn't exist.\n config=configparser.ConfigParser()\n config.read(\"config.ini\")\n modified=config.getboolean(\"flag\",\"modified\")\n except:\n print(\"config.ini has been invalidly modified or deleted!\")\n return modified\n\n\ndef downloader():\n '''\n Read config from config.ini and download vcodes.\n Return the number of vcodes downloaded successfully.\n '''\n print(\"Start to download vcodes now.\")\n config=configparser.ConfigParser()\n config.read(\"config.ini\")\n url=config.get(\"download\", \"url\")\n folder=config.get(\"download\", \"folder\")\n num=config.getint(\"download\", \"num\")\n maxAttemptTimes=config.getint(\"download\", \"maxAttemptTimes\")\n successNum=0\n if os.path.exists(folder)== False:\n os.makedirs(folder)\n for i in range(0,abs(num)):\n attemptTimes=0\n while attemptTimes= 8:\n return True\n else:\n return False\n\n for y in range(binImage.height):\n for x in range(binImage.width):\n if(isNoise(x,y)==True):\n binImage.putpixel((x,y),1)\n\n\ndef cutAndSaveImage(binImage,num,imageOutputFolder):\n '''\n Cut binImage into single characters and save in imageOutputFolder as num-x.jpg.\n Return the paths of single characters.\n '''\n binImage=binImage.point(lambda x:x*255)\n imagePaths=[]\n config=configparser.ConfigParser()\n config.read(\"config.ini\")\n characterNum=config.getint(\"preprocess\", \"characternum\")\n firstCharacter=[int(x) for x in config.get(\"preprocess\", \"firstcharacter\").split(\",\")]\n step=config.getint(\"preprocess\", \"step\")\n for i in range(0,characterNum):\n x1=step*i+firstCharacter[0]\n y1=firstCharacter[1]\n x2=step*i+firstCharacter[2]\n y2=firstCharacter[3]\n subImage=binImage.crop((x1,y1,x2,y2))\n path=imageOutputFolder+ \"/{0:04d}-{1}.jpg\".format(num,i)\n subImage.save(path)\n imagePaths.append(path)\n return imagePaths\n\n\ndef preprocessorAll(successNum):\n '''\n After downloading, preprocess every vcode.\n '''\n print(\"Start to preprocess vcodes now.\")\n config=configparser.ConfigParser()\n config.read(\"config.ini\")\n folderOfRawVcode=config.get(\"download\", \"folder\")\n folderOfCuttedVcode=config.get(\"preprocess\",\"folder\")\n if os.path.exists(folderOfCuttedVcode)== False:\n os.makedirs(folderOfCuttedVcode)\n for i in range(0,successNum):\n imagePath=folderOfRawVcode + \"/{0:04d}\".format(i) + \".jpg\"\n imageRaw=Image.open(imagePath)\n #imageRaw.save(\"CuttedVcode/raw\" + str(i) + \".jpg\")\n imageGrey=imageRaw.convert(\"L\")\n #imageGrey.save(\"CuttedVcode/grey\" + str(i) + \".jpg\")\n imageBin=imageGrey.point(lambda x : x >140)\n #binImagePrinter(imageBin) #Print the binarized image for debugging.\n ridNoise(imageBin)\n #binImagePrinter(imageBin)\n cutAndSaveImage(imageBin,i,imageOutputFolder=folderOfCuttedVcode)\n print(\"\\rPreprocessed {0} of {1} vcodes successfully!\".format(i+1,successNum),end=\"\")\n\n\nif __name__ == \"__main__\":\n if checkBeforeRunning()==False:\n print(\"Please modify config.ini correctly before running this program.\")\n exit()\n successNum=downloader()\n preprocessorAll(successNum)\n print(\"\\nVcodes have been collected and preprocessed successfully.\")\n print(\"Please run classify.py to classify images artificially.\")\n\n\n\n\n\n","sub_path":"CollectAndPreprocess.py","file_name":"CollectAndPreprocess.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"543472477","text":"import random \r\nimport datetime\r\nuser = User('Fake_User')\r\ndb.session.add(user)\r\ndb.session.commit()\r\nuser = User.query.first()\r\ntag_one = Tag('Python') \r\ntag_two = Tag('Flask') \r\ntag_three = Tag('SQLAlechemy') \r\ntag_four = Tag('Jinja') \r\ntag_list = [tag_one, tag_two, tag_three, tag_four]\r\ns = \"Example text\"\r\nfor i in range(100):\r\n new_post = Post(\"Post \" + str(i)) \r\n new_post.user = user \r\n new_post.publish_date = datetime.datetime.now() \r\n new_post.text = s \r\n new_post.tags = random.sample(tag_list, random.randint(1, 3)) \r\n db.session.add(new_post)\r\ndb.session.commit()","sub_path":"test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"616916408","text":" # calc.py - a Python calculator\nfrom tkinter import *\n\nclass Calculator(Frame):\n def __init__(self):\n self.current = '0'\n self.store = ''\n self.new_num = True\n self.combo = False\n self.op = ''\n\n def num_press(self, num):\n\n #if operation in stack, clear output field for new number\n if self.combo:\n origin = text_box.get()\n elif self.op:\n self.combo = True\n text_box.delete(0,END)\n #if no op in stack keep the old number to add up with new one\n else:\n origin = text_box.get()\n\n temp2 = str(num)\n if self.new_num:\n self.current = temp2\n self.new_num = False\n else:\n if temp2 == '.':\n if temp2 in origin:\n return\n self.current = origin + temp2\n self.display(self.current)\n\n def oper(self,op):\n if self.combo:\n self.execute()\n self.op = op\n op_box.delete(0,END)\n op_box.insert(0,op)\n self.combo = False\n self.store = text_box.get()\n self.op = op\n op_box.delete(0,END)\n op_box.insert(0,op)\n self.new_num = True\n\n def execute(self):\n answer = self.store + self.op + str(self.current)\n try:\n if self.op == '+':\n self.current = float(self.store) + float(self.current)\n elif self.op == '-':\n self.current = float(self.store) - float(self.current)\n elif self.op == '*':\n self.current = float(self.store) * float(self.current)\n elif self.op == '/':\n if float(self.current)==0:\n if float(self.store)==0:\n self.current = 'Infinity'\n self.op = ''\n self.display(self.current)\n else:\n self.current = 'NaN'\n self.op = ''\n self.display(self.current)\n else:\n self.current = float(self.store) / float(self.current)\n listbox.insert(END,answer + '=' + str(self.current))\n self.op = ''\n self.display(self.current)\n except:\n self.current == 'NaN'\n self.op = ''\n self.display(self.current)\n op_box.delete(0,END)\n\n\n def final(self):\n if self.op:\n self.execute()\n self.new_num = True\n self.combo = ''\n else:\n return\n\n def display(self, number):\n text_box.delete(0, END)\n text_box.insert(0, number)\n\n def clear(self):\n self.display(0)\n self.current = '0'\n self.new_num = True\n\n def all_clear(self):\n self.display(0)\n self.op = ''\n self.store = ''\n self.current = ''\n self.new_num = True\n self.combo = False\n\nclass ResultBox():\n def clicked(self,event):\n for selection in listbox.curselection():\n sum1.all_clear()\n bla, current = listbox.get(selection).split('=')\n if current == 'NaN':\n return\n elif current == 'Infinity':\n return\n else:\n sum1.current=current\n sum1.display(current)\n def clear(self):\n listbox.delete(0, END)\n sum1.all_clear()\n\n#components\nsum1 = Calculator()\nrb = ResultBox()\nroot = Tk()\ncalc = Frame(root)\ncalc.grid()\nlb = Frame(root)\nlb.grid()\n\n#Frame setting\nroot.title(\"Calculator\")\ntext_box = Entry(calc, justify=RIGHT)\ntext_box.grid(row = 0, column = 0, columnspan = 3, pady = 5)\ntext_box.insert(0, \"0\")\n\nop_box = Entry(calc,width=5)\nop_box.grid(row = 0, column = 3)\n\n#listbox\nlistbox = Listbox(lb)\nlistbox.bind('', rb.clicked)\nb = Button(calc , text='RBC')\nb['command'] = rb.clear\nb.grid(row = 5, column = 0, pady = 5)\nlistbox.pack()\n\n#GUI numbers\nnumbers = \"789456123\"\ni = 0\nbttn = []\nfor j in range(1,4):\n for k in range(3):\n bttn.append(Button(calc, text = numbers[i]))\n bttn[i].grid(row = j, column = k, pady = 5)\n bttn[i][\"command\"] = lambda x = numbers[i]: sum1.num_press(x)\n i += 1\n\n#GUI others\nbttn_0 = Button(calc, text = \"0\")\nbttn_0[\"command\"] = lambda: sum1.num_press(0)\nbttn_0.grid(row = 4, column = 1, pady = 5)\n\nbttn_div = Button(calc, text = chr(247))\nbttn_div[\"command\"] = lambda: sum1.oper(\"/\")\nbttn_div.grid(row = 1, column = 3, pady = 5)\n\nbttn_mult = Button(calc, text = \"x\")\nbttn_mult[\"command\"] = lambda: sum1.oper(\"*\")\nbttn_mult.grid(row = 2, column = 3, pady = 5)\n\nminus = Button(calc, text = \"-\")\nminus[\"command\"] = lambda: sum1.oper(\"-\")\nminus.grid(row = 3, column = 3, pady = 5)\n\npoint = Button(calc, text = \".\")\npoint[\"command\"] = lambda: sum1.num_press(\".\")\npoint.grid(row = 4, column = 0, pady = 5)\n\nadd = Button(calc, text = \"+\")\nadd[\"command\"] = lambda: sum1.oper('+')\nadd.grid(row = 4, column = 3, pady = 5)\n\nclear = Button(calc, text = \"C\")\nclear[\"command\"] = sum1.clear\nclear.grid(row = 5, column = 1, pady = 5)\n\nall_clear = Button(calc, text = \"AC\")\nall_clear[\"command\"] = sum1.all_clear\nall_clear.grid(row = 5, column = 2, pady = 5)\n\nequals = Button(calc, text = \"=\")\nequals[\"command\"] = sum1.final\nequals.grid(row = 5, column = 3, pady = 5)\n\n#run\nroot.mainloop()\n","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"583073356","text":"#Esta clase se encarga de la representación de un poligono a través de un array de puntos\nimport punto\n\nclass Poligono():\n #Constructor de la clase Poligono\n def __init__(self,puntos):\n self.lados = len(puntos)\n self.puntos = puntos\n\n def to_string(self):\n string = \"[\"\n for i in range(len(self.puntos)):\n if(i!=len(self.puntos)-1):\n string = string + self.puntos[i].to_string() + \", \"\n else:\n string = string + self.puntos[i].to_string()\n string = string + \"]\"\n return string\n\n#Ejemplo creación de un poligono\nclass main():\n print(\"CLASE POLIGONO\")\n A = punto.Punto(x = 0, y = 0)\n B = punto.Punto(x = 0, y = 1)\n C = punto.Punto(x = 1, y = 0)\n D = punto.Punto(x = 1, y = 1)\n cuadrado = Poligono(puntos = [A,B,C,C])\n print(\"Los puntos del poligono \\\"cuadrado\\\" son:\", end = \" \")\n print(cuadrado.to_string())\n\n cuadrado.puntos.pop(0)\n print(\"Eliminaremos el primer punto:\", end = \" \")\n print(cuadrado.to_string())\n\n cuadrado.puntos.pop(0)\n print(\"Probamos a eliminar otra vez el primer punto:\", end = \" \")\n print(cuadrado.to_string())\n print()","sub_path":"poligono.py","file_name":"poligono.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"653911907","text":"import time\nfrom psycopg2 import OperationalError as Psycopg2OpError\nfrom django.db.utils import OperationalError\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n self.stdout.write('Waiting for database')\n db_up = False\n while not db_up:\n try:\n self.check(databases=['default'])\n db_up = True\n except(Psycopg2OpError, OperationalError):\n self.stdout.write('Database unavailable, waiting 1 second...')\n time.sleep(1)\n\n self.stdout.write(self.style.SUCCESS('Database available!'))\n","sub_path":"app/book/management/commands/wait_for_db.py","file_name":"wait_for_db.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"654216853","text":"# Goal - Make a position Weight Matris and plot it using seaborn\n# Matrix - x-axis(amino acids), y-axis(position on aa seq)\nimport numpy\nimport seaborn\nimport matplotlib.pyplot as plt\n\n# Import Data\nb = 'VR3_Low_clustal.fasta'\nnb_file = open(b) # default - r - open for reading\nnb = nb_file.read()\nnb_list = nb.splitlines()\nnb_file.close()\nlen_xter = len(nb_list[1])\nprint(nb_list)\n\n#Cut out the aaseq\nlinea = []\nposa = []\naaseq = []\naaseq_line = numpy.arange(1, len(nb_list), 2) # Note this might change\nfor position, line in enumerate(nb_list):\n print(position, line)\n linea.append(line)\n # all the lines of the fasta file\n posa.append(position)\n # the position of the corresponding fasta lines\n if position in aaseq_line:\n aaseq.append(line)\n # To extract the aa lines\na = aaseq\nprint('aaseq:',a)\n\n# Make the matrix\naa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'W', 'V', 'Y', '-']\ns = (len(aa), len(a[1]))\nmat = numpy.zeros(s) # matrix of 20 rows and length in column\n# rows: A, C, D, E, F, G, H, I, K, L, M, N, P, Q, R, S, T, W, Y, Z\npos = numpy.arange(1, len(a[1])+1, 1)\n\nfor position, line in enumerate(a):\n for j in range(len(a[1])):\n for i in range(len(aa)):\n aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'W', 'V', 'Y', '-']\n j=j\n if line[j] == aa[i]:\n num = mat[i, j] + 1\n mat[i, j] = num\n\n\nprint(mat)\n\nN = len(a) # Number of sequences\nI = len(a[1]) # Length of sequences\n\nppm = mat / N\nprint(ppm)\n\nb = 0.05 #for aa it is 1/20 = 0.05\npwm = numpy.log2((ppm/b))\nprint('pwm:', pwm)\n\n\n# Plot as a heat wave plot\n# weimat = seaborn.load_dataset(pwm)\n# pwm_df = weimat.pivot('DNA Nucleotide', 'Position', 'Position Probability')\nseaborn.heatmap(pwm, yticklabels=aa, xticklabels=pos, vmin=0, vmax=5)\nplt.title('Heat Map for Amino Acids in CDR3 for the Low Nanobodies (10 seq)')\nplt.show()\n\n#Incorporate a pseudo count value","sub_path":"PosWeightMat_AA.py","file_name":"PosWeightMat_AA.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"8030440","text":"##\n# File: testLeadingHumanProtein.py\n# Author: J. Westbrook\n# Date: 13-Oct-2020\n#\n# Updates:\n#\n##\n\"\"\"\nHuman protein with only 9606 taxonomy.\n\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n__author__ = \"John Westbrook\"\n__email__ = \"jwest@rcsb.rutgers.edu\"\n__license__ = \"Apache 2.0\"\n\nimport copy\nimport logging\nimport os\nimport time\nimport unittest\n\nfrom rcsb.exdb.utils.ObjectExtractor import ObjectExtractor\nfrom rcsb.utils.anal.EntityClusterDataPrep import EntityClusterDataPrep\nfrom rcsb.utils.config.ConfigUtil import ConfigUtil\nfrom rcsb.utils.io.MarshalUtil import MarshalUtil\nfrom rcsb.utils.io.TimeUtil import TimeUtil\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s\")\nlogger = logging.getLogger()\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nTOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))\n\n\nclass LeadingHumanProteinTests(unittest.TestCase):\n def __init__(self, methodName=\"runTest\"):\n super(LeadingHumanProteinTests, self).__init__(methodName)\n self.__verbose = True\n\n def setUp(self):\n #\n self.__mockTopPath = os.path.join(TOPDIR, \"rcsb\", \"mock-data\")\n configPath = os.path.join(TOPDIR, \"rcsb\", \"mock-data\", \"config\", \"dbload-setup-example.yml\")\n self.__clusterTopPath = os.path.join(TOPDIR, \"rcsb\", \"mock-data\", \"cluster_data\", \"mmseqs_clusters_current\")\n #\n configName = \"site_info_remote_configuration\"\n self.__cfgOb = ConfigUtil(configPath=configPath, defaultSectionName=configName, mockTopPath=self.__mockTopPath)\n #\n self.__resourceName = \"MONGO_DB\"\n self.__workPath = os.path.join(TOPDIR, \"CACHE\", \"exdb\")\n #\n self.__testEntryCacheKwargs = {\"fmt\": \"json\", \"indent\": 3}\n self.__objectLimitTest = None\n #\n self.__startTime = time.time()\n logger.debug(\"Starting %s at %s\", self.id(), time.strftime(\"%Y %m %d %H:%M:%S\", time.localtime()))\n\n def tearDown(self):\n endTime = time.time()\n logger.info(\"Completed %s at %s (%.4f seconds)\\n\", self.id(), time.strftime(\"%Y %m %d %H:%M:%S\", time.localtime()), endTime - self.__startTime)\n\n def testExtractUniProtDetails(self):\n \"\"\"Test case - extract selected UniProt details\"\"\"\n try:\n obEx = ObjectExtractor(\n self.__cfgOb,\n databaseName=\"uniprot_exdb\",\n collectionName=\"reference_entry\",\n cacheFilePath=os.path.join(self.__workPath, \"uniprot-data-details-example.json\"),\n useCache=False,\n keyAttribute=\"uniprot\",\n uniqueAttributes=[\"rcsb_id\"],\n cacheKwargs=self.__testEntryCacheKwargs,\n objectLimit=self.__objectLimitTest,\n selectionList=[\"rcsb_id\", \"taxonomy_id\", \"host_taxonomy_id\", \"names\", \"gene\", \"source_scientific\", \"sequence\"],\n )\n eCount = obEx.getCount()\n logger.info(\"Reference sequence count is %d\", eCount)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()\n\n def testExtractEntryDetails(self):\n \"\"\"Test case - extract selected entry details\"\"\"\n try:\n obEx = ObjectExtractor(\n self.__cfgOb,\n databaseName=\"pdbx_core\",\n collectionName=\"pdbx_core_entry\",\n cacheFilePath=os.path.join(self.__workPath, \"entry-data-details-example.json\"),\n useCache=False,\n keyAttribute=\"entry\",\n uniqueAttributes=[\"rcsb_id\"],\n cacheKwargs=self.__testEntryCacheKwargs,\n objectLimit=self.__objectLimitTest,\n selectionList=[\"rcsb_id\", \"rcsb_accession_info\", \"struct\", \"exptl\"],\n )\n eCount = obEx.getCount()\n logger.info(\"Entry count is %d\", eCount)\n\n objD = obEx.getObjects()\n for _, obj in objD.items():\n rcsbId = obj[\"rcsb_id\"]\n logger.debug(\"%s rcsb_accession_info %s\", rcsbId, obj[\"rcsb_accession_info\"][\"initial_release_date\"])\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()\n\n def __extractEntityTaxonomyAndRefDetails(self, targetTaxIdL):\n \"\"\"Test case - extract unique entity source and host taxonomies\"\"\"\n try:\n # targetTaxIdL = [9606]\n obEx = ObjectExtractor(\n self.__cfgOb,\n databaseName=\"pdbx_core\",\n collectionName=\"pdbx_core_polymer_entity\",\n cacheFilePath=os.path.join(self.__workPath, \"entity-taxonomy-ref-example.json\"),\n useCache=False,\n keyAttribute=\"entity\",\n uniqueAttributes=[\"rcsb_id\"],\n cacheKwargs=self.__testEntryCacheKwargs,\n objectLimit=None,\n #\n selectionQuery={\"entity_poly.rcsb_entity_polymer_type\": \"Protein\", \"rcsb_entity_source_organism.ncbi_taxonomy_id\": {\"$in\": targetTaxIdL}},\n selectionList=[\n \"rcsb_id\",\n \"rcsb_entity_source_organism.ncbi_taxonomy_id\",\n \"rcsb_polymer_entity_container_identifiers.reference_sequence_identifiers\",\n \"rcsb_polymer_entity\",\n ],\n )\n eCount = obEx.getCount()\n logger.info(\"Polymer entity count is %d\", eCount)\n taxIdS = set()\n objD = obEx.getObjects()\n for _, eD in objD.items():\n rcsbId = eD[\"rcsb_id\"]\n try:\n for tD in eD[\"rcsb_entity_source_organism\"]:\n taxIdS.add(tD[\"ncbi_taxonomy_id\"])\n if not set(targetTaxIdL) & taxIdS:\n logger.error(\"%s missing any target taxId, has only %r\", rcsbId, taxIdS)\n except Exception:\n pass\n logger.info(\"Unique taxons %d\", len(taxIdS))\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()\n\n def __getEntryDetails(self):\n mU = MarshalUtil(workPath=self.__workPath)\n entryFilePath = os.path.join(self.__workPath, \"entry-data-details-example.json\")\n tD = mU.doImport(entryFilePath, fmt=\"json\")\n return tD[\"entry\"]\n\n def __getUniProtDetails(self):\n mU = MarshalUtil(workPath=self.__workPath)\n unpFilePath = os.path.join(self.__workPath, \"uniprot-data-details-example.json\")\n tD = mU.doImport(unpFilePath, fmt=\"json\")\n return tD[\"uniprot\"]\n\n def __getHostedTaxonomies(self, targetHostTaxId, unpD):\n hostedTaxIdL = []\n for _, uD in unpD.items():\n if \"host_taxonomy_id\" in uD and uD[\"host_taxonomy_id\"] == targetHostTaxId:\n hostedTaxIdL.append(uD[\"taxonomy_id\"])\n return list(set(hostedTaxIdL))\n\n def __getEntityDetails(self):\n mU = MarshalUtil(workPath=self.__workPath)\n entityFilePath = os.path.join(self.__workPath, \"entity-taxonomy-ref-example.json\")\n tD = mU.doImport(entityFilePath, fmt=\"json\")\n return tD[\"entity\"]\n\n def __getClusterDetails(self):\n ecdp = EntityClusterDataPrep(clusterPath=self.__clusterTopPath)\n return ecdp.fetch()\n\n def testfindFirst(self):\n for level in [\"100\", \"95\", \"90\"]:\n self.__testFindFirst(level)\n\n def __testFindFirst(self, level=\"95\"):\n \"\"\"Find first reference sequence in PDB\n\n Returns:\n [type]: [description]\n\n Example entity object:\n\n \"1ERU_1\": {\n \"rcsb_entity_source_organism\": [\n {\n \"ncbi_taxonomy_id\": 9606\n }\n ],\n \"rcsb_polymer_entity\": {\n \"details\": \"ACTIVE SITE CYSTEINES 32 AND 35 IN THE OXIDIZED FORM\",\n \"formula_weight\": 11.75,\n \"src_method\": \"man\",\n \"rcsb_multiple_source_flag\": \"N\",\n \"rcsb_source_part_count\": 1,\n \"rcsb_source_taxonomy_count\": 1,\n \"pdbx_description\": \"THIOREDOXIN\",\n \"pdbx_number_of_molecules\": 1,\n \"rcsb_macromolecular_names_combined\": [\n {\n \"name\": \"THIOREDOXIN\",\n \"provenance_code\": \"ECO:0000304\",\n \"provenance_source\": \"PDB Preferred Name\"\n }\n ]\n },\n \"rcsb_polymer_entity_container_identifiers\": {\n \"reference_sequence_identifiers\": [\n {\n \"database_name\": \"UniProt\",\n \"database_accession\": \"P10599\",\n \"provenance_source\": \"SIFTS\"\n }\n ]\n },\n \"rcsb_id\": \"1ERU_1\"\n },\n \"\"\"\n # level = \"95\"\n targetTaxId = 9606\n tU = TimeUtil()\n uniProtD = self.__getUniProtDetails()\n # hostedTaxIdL = self.__getHostedTaxonomies(targetTaxId, uniProtD)\n # logger.info(\"For %d host taxa count %d\", targetTaxId, len(hostedTaxIdL))\n #\n hostedTaxIdL = []\n self.__extractEntityTaxonomyAndRefDetails([targetTaxId])\n entityD = self.__getEntityDetails()\n entryD = self.__getEntryDetails()\n\n uniProtTaxIdD = {k: v[\"taxonomy_id\"] for k, v in uniProtD.items()}\n\n # tD = tU.getDateTimeObj(tS)\n # entities assigned to each ref sequence [uniprot]=[(entityId, releaseDT), ]\n refSeqD = {}\n # ref sequences assigned for each entity [entityId] = [uniprot,...]\n entityRefSeqD = {}\n entityTaxCountD = {}\n entityReleaseDateD = {}\n #\n multiTaxCount = 0\n multiTaxRefSeqCount = 0\n missCount = 0\n badAssignCount = 0\n #\n entriesPerYearMT = {}\n entriesPerYear = {}\n #\n for entityId, eObj in entityD.items():\n # logger.info(\"eObj %r\", eObj)\n # entityId = eObj[\"rcsb_id\"]\n entryId = entityId[:4]\n releaseTs = entryD[entryId][\"rcsb_accession_info\"][\"initial_release_date\"]\n releaseDt = tU.getDateTimeObj(releaseTs)\n releaseYear = int(releaseDt.strftime(\"%Y\"))\n # partCount = eObj[\"rcsb_polymer_entity\"][\"rcsb_source_part_count\"]\n taxCount = eObj[\"rcsb_polymer_entity\"][\"rcsb_source_taxonomy_count\"]\n entityTaxCountD[entityId] = taxCount\n #\n entriesPerYearMT.setdefault(releaseYear, set()).add(entryId)\n #\n if taxCount > 1:\n multiTaxCount += 1\n continue\n #\n entriesPerYear.setdefault(releaseYear, set()).add(entryId)\n ok = False\n try:\n rDL = eObj[\"rcsb_polymer_entity_container_identifiers\"][\"reference_sequence_identifiers\"]\n for rD in rDL:\n if rD[\"database_name\"] == \"UniProt\":\n uniProtId = rD[\"database_accession\"]\n if uniProtId in uniProtTaxIdD and uniProtTaxIdD[uniProtId] in [targetTaxId] + hostedTaxIdL:\n refSeqD.setdefault(uniProtId, []).append((entityId, releaseDt))\n entityRefSeqD.setdefault(entityId, []).append(uniProtId)\n ok = True\n else:\n badAssignCount += 1\n uTax = uniProtTaxIdD[uniProtId] if uniProtId in uniProtTaxIdD else None\n logger.debug(\"%s has bad uniprot assignment %s utaxId %r\", entityId, uniProtId, uTax)\n if ok and taxCount > 1:\n multiTaxRefSeqCount += 1\n except Exception:\n pass\n if not ok:\n missCount += 1\n logger.debug(\"Missing Uniprot reference for %r\", entityId)\n # only for items in the taxonomy scope\n entityReleaseDateD[entityId] = releaseDt\n #\n logger.info(\"Level %s\", level)\n logger.info(\"Total polymer entities %d\", len(entityD))\n logger.info(\"Length reference sequences assigned %d\", len(refSeqD))\n logger.info(\"multi-taxonomy entities %d\", multiTaxCount)\n logger.info(\"multi-taxonomy entities with assigned references sequences %d\", multiTaxRefSeqCount)\n logger.info(\"Unassigned polymer entities %d\", missCount)\n logger.info(\"Bad assignments (switch taxa) %d\", badAssignCount)\n #\n logger.debug(\"entriesPerYearMT %r\", entriesPerYearMT)\n logger.debug(\"entriesPerYear %r\", entriesPerYear)\n entriesPerYearMTCount = {}\n sumEntryMT = 0\n for yr in sorted(entriesPerYearMT):\n nn = len(entriesPerYearMT[yr])\n entriesPerYearMTCount[yr] = nn\n sumEntryMT += nn\n #\n entriesPerYearCount = {}\n entriesPerYearCountCum = {}\n sumEntry = 0\n for yr in sorted(entriesPerYear):\n nn = len(entriesPerYear[yr])\n entriesPerYearCount[yr] = nn\n sumEntry += nn\n entriesPerYearCountCum[yr] = sumEntry\n #\n logger.info(\"entriesPerYearCount (%d) %r\", sumEntry, entriesPerYearCount)\n logger.info(\"entriesPerYearCount (%d) %r\", sumEntry, entriesPerYearCountCum)\n logger.info(\"entriesPerYearMTCount (%d) %r\", sumEntryMT, entriesPerYearMTCount)\n #\n entriesPerYearCountRowL = []\n for yr, count in entriesPerYearCount.items():\n entriesPerYearCountRowL.append({\"Year\": yr, \"Entries_Containing_Human_Proteins\": count})\n #\n for uniProtId in refSeqD:\n refSeqD[uniProtId] = sorted(refSeqD[uniProtId], key=lambda item: item[1])\n for entityId in entityRefSeqD:\n entityRefSeqD[entityId] = list(set(entityRefSeqD[entityId]))\n #\n retRowFullL, retRowL, firstEntriesPerYearCountRowL = self.clusterFirstRep(level, entityRefSeqD, entityTaxCountD, entityReleaseDateD, entryD, entityD, uniProtD)\n #\n mU = MarshalUtil(workPath=self.__workPath)\n mU.doExport(os.path.join(self.__workPath, \"%s-first-human-entity-full.csv\" % level), retRowFullL, fmt=\"csv\")\n mU.doExport(os.path.join(self.__workPath, \"%s-first-human-entity-abbrev.csv\" % level), retRowL, fmt=\"csv\")\n #\n pth = os.path.join(self.__workPath, \"human-containing-entries-by-year.csv\")\n mU.doExport(pth, entriesPerYearCountRowL, fmt=\"csv\")\n pth = os.path.join(self.__workPath, \"human-containing-entries-by-year.md\")\n self.__formatTable(pth, entriesPerYearCountRowL)\n #\n pth = os.path.join(self.__workPath, \"%s-leading-human-containing-entries-by-year.csv\" % level)\n mU.doExport(os.path.join(self.__workPath, \"%s-leading-human-containing-entries-by-year.csv\" % level), firstEntriesPerYearCountRowL, fmt=\"csv\")\n pth = os.path.join(self.__workPath, \"%s-leading-human-containing-entries-by-year.md\" % level)\n self.__formatTable(pth, firstEntriesPerYearCountRowL)\n\n def __formatTable(self, pth, rowDL):\n tsL = []\n rowKeys = list(rowDL[0].keys())\n sepL = [\":-------:\" for n in range(len(rowKeys))]\n ln = \"| \" + \" | \".join(rowKeys) + \" |\"\n tsL.append(ln)\n ln = \"| \" + \" | \".join(sepL) + \" |\"\n tsL.append(ln)\n for rowD in rowDL:\n ln = \"| \" + \" | \".join([str(v) for v in rowD.values()]) + \" |\"\n tsL.append(ln)\n with open(pth, \"w\") as ofh:\n for ts in tsL:\n ofh.write(\"%s\\n\" % ts)\n\n def __getEntityAnnotations(self, entityId, entityD):\n descr = \"\"\n try:\n descr = entityD[entityId][\"rcsb_polymer_entity\"][\"pdbx_description\"]\n except Exception:\n pass\n return descr\n\n def __getEntryAnnotations(self, entryId, entryD):\n title = \"\"\n descr = \"\"\n try:\n title = entryD[entryId][\"struct\"][\"title\"]\n descr = entryD[entryId][\"struct\"][\"pdbx_descriptor\"]\n except Exception:\n pass\n return title, descr\n\n def __getUniProtAnnotations(self, upIdL, uniProtD, jChar=\"|\"):\n geneL = []\n nameL = []\n for upId in upIdL:\n try:\n for nD in uniProtD[upId][\"names\"]:\n if nD[\"nameType\"] == \"recommendedName\":\n nameL.append(nD[\"name\"])\n break\n except Exception:\n pass\n try:\n for gD in uniProtD[upId][\"gene\"]:\n if gD[\"type\"] == \"primary\":\n geneL.append(gD[\"name\"])\n break\n except Exception:\n pass\n return jChar.join(nameL), jChar.join(geneL)\n\n def clusterFirstRep(\n self,\n level,\n entityRefSeqD,\n entityTaxCountD,\n entityReleaseDateD,\n entryD,\n entityD,\n uniProtD,\n ):\n retRowL = []\n retRowFullL = []\n clD = self.__getClusterDetails()\n clusterD = clD[\"clusterD\"]\n firstEntriesPerYear = {}\n #\n # list of reference sequences assigned in this cluster\n clusterRefD = {}\n clusterMembersD = {}\n for clusterId, entityIdL in clusterD[level].items():\n for entityId in entityIdL:\n if entityId not in entityReleaseDateD:\n # skip if not in taxonomy scope\n continue\n if entityId in entityRefSeqD: # ref assigned\n rL = entityRefSeqD[entityId]\n clusterRefD.setdefault(clusterId, []).extend(rL)\n else: # ref unassigned\n rL = []\n clusterMembersD.setdefault(clusterId, []).append((entityId, entityReleaseDateD[entityId], rL))\n #\n uclusterRefD = {}\n for clusterId, rL in clusterRefD.items():\n uclusterRefD[clusterId] = list(set(rL))\n\n logger.info(\"Populated clusters at %s - %d\", level, len(clusterMembersD))\n for clusterId, memberList in clusterMembersD.items():\n clusterRefSeqCount = len(uclusterRefD[clusterId]) if clusterId in uclusterRefD else 0\n clusterRefSeqL = uclusterRefD[clusterId] if clusterId in uclusterRefD else []\n memberListS = sorted(memberList, key=lambda item: item[1])\n (firstEntityId, firstRelDT, firstRefL) = memberListS[0]\n firstReleaseYear = int(firstRelDT.strftime(\"%Y\"))\n #\n if clusterRefSeqCount:\n logger.debug(\"Cluster %-8s members %-5d (of %-5d) reference sequences %2d\", clusterId, len(memberList), len(clusterD[level][clusterId]), clusterRefSeqCount)\n # count the instances in the\n #\n refCount = len([True for m in memberList if firstRefL == m[2]])\n logger.debug(\">>> %5s (%d/%d) %s %r\", firstEntityId, entityTaxCountD[firstEntityId], refCount, firstRelDT.strftime(\"%Y-%m-%d\"), firstRefL)\n\n entryTitle, entryDescr = self.__getEntryAnnotations(firstEntityId[:4], entryD)\n entityDescr = self.__getEntityAnnotations(firstEntityId, entityD)\n uName, uGene = self.__getUniProtAnnotations(firstRefL, uniProtD)\n\n logger.debug(\"-------- %s \", entryTitle)\n logger.debug(\"-------- %s \", entryDescr)\n logger.debug(\"-------- %s \", entityDescr)\n logger.debug(\"-------- %s \", uName)\n logger.debug(\"-------- %s \", uGene)\n #\n recD = {\n \"Cluster_ID\": clusterId,\n \"Cluster_Members_Total\": len(clusterD[level][clusterId]),\n \"Cluster_Members_Human\": len(memberList),\n \"Cluster_UniProt_Count\": clusterRefSeqCount,\n \"PDB_Entity_ID\": firstEntityId,\n \"PDB_Release_Year\": int(firstRelDT.strftime(\"%Y\")),\n \"UniProt_IDs\": \",\".join(firstRefL),\n \"Assign_Count\": refCount,\n \"PDB_Struct_title\": entryTitle,\n \"PDB_Struct_Descr\": entryDescr,\n \"PDB_Entity_Descr\": entityDescr,\n \"UniProt_Name\": uName,\n \"Uniprot_Gene\": uGene,\n }\n retRowL.append(recD)\n retRowFullL.append(recD)\n firstEntriesPerYear.setdefault(firstReleaseYear, set()).add(firstEntityId[:4])\n #\n tL = copy.copy(clusterRefSeqL)\n try:\n for upId in firstRefL:\n tL.remove(upId)\n except ValueError:\n pass\n for (entityId, relDT, refL) in memberListS[1:]:\n for upId in refL:\n if upId in tL:\n refCount = len([True for m in memberList if m[2] == refL])\n logger.debug(\">>> >>> %5s (%d/%d) %s %r\", entityId, entityTaxCountD[entityId], refCount, relDT.strftime(\"%Y-%m-%d\"), refL)\n entryTitle, entryDescr = self.__getEntryAnnotations(entityId[:4], entryD)\n entityDescr = self.__getEntityAnnotations(entityId, entityD)\n uName, uGene = self.__getUniProtAnnotations(refL, uniProtD)\n #\n logger.debug(\"------------------- %s \", entryTitle)\n logger.debug(\"------------------- %s \", entryDescr)\n logger.debug(\"------------------- %s \", entityDescr)\n logger.debug(\"------------------- %s \", uName)\n logger.debug(\"------------------- %s \", uGene)\n # relYear = int(relDT.strftime(\"%Y\"))\n # firstEntriesPerYear.setdefault(relYear, set()).add(entityId[:4])\n recD = {\n \"Cluster_ID\": clusterId,\n \"Cluster_Members_Total\": len(clusterD[level][clusterId]),\n \"Cluster_Members_Human\": len(memberList),\n \"Cluster_UniProt_Count\": clusterRefSeqCount,\n \"PDB_Entity_ID\": entityId,\n \"PDB_Release_Year\": int(relDT.strftime(\"%Y\")),\n \"UniProt_IDs\": \",\".join(refL),\n \"Assign_Count\": refCount,\n \"PDB_Struct_title\": entryTitle,\n \"PDB_Struct_Descr\": entryDescr,\n \"PDB_Entity_Descr\": entityDescr,\n \"UniProt_Name\": uName,\n \"Uniprot_Gene\": uGene,\n }\n retRowFullL.append(recD)\n try:\n tL.remove(upId)\n except ValueError:\n pass\n else:\n # No reference sequeuces -\n logger.debug(\"cluster %-8s members %-5d (of %-5d) no reference sequences\", clusterId, len(memberList), len(clusterD[level][clusterId]))\n logger.debug(\">>> %5s (%d) %s\", firstEntityId, entityTaxCountD[firstEntityId], firstRelDT.strftime(\"%Y-%m-%d\"))\n entryTitle, entryDescr = self.__getEntryAnnotations(firstEntityId[:4], entryD)\n entityDescr = self.__getEntityAnnotations(firstEntityId, entityD)\n logger.debug(\"-------- %s \", entryTitle)\n logger.debug(\"-------- %s \", entryDescr)\n logger.debug(\"-------- %s \", entityDescr)\n firstEntriesPerYear.setdefault(firstReleaseYear, set()).add(firstEntityId[:4])\n recD = {\n \"Cluster_ID\": clusterId,\n \"Cluster_Members_Total\": len(clusterD[level][clusterId]),\n \"Cluster_Members_Human\": len(memberList),\n \"Cluster_UniProt_Count\": clusterRefSeqCount,\n \"PDB_Entity_ID\": firstEntityId,\n \"PDB_Release_Year\": int(firstRelDT.strftime(\"%Y\")),\n \"UniProt_IDs\": \"\",\n \"Assign_Count\": 0,\n \"PDB_Struct_title\": entryTitle,\n \"PDB_Struct_Descr\": entryDescr,\n \"PDB_Entity_Descr\": entityDescr,\n \"UniProt_Name\": \"\",\n \"Uniprot_Gene\": \"\",\n }\n retRowL.append(recD)\n retRowFullL.append(recD)\n #\n #\n firstEntriesPerYearCount = {}\n sumEntry = 0\n for yr in sorted(firstEntriesPerYear):\n nn = len(firstEntriesPerYear[yr])\n firstEntriesPerYearCount[yr] = nn\n sumEntry += nn\n #\n logger.info(\"%s firstEntriesPerYearCount (%d) %r\", level, sumEntry, firstEntriesPerYearCount)\n firstEntriesPerYearCountRowL = []\n for yr, count in firstEntriesPerYearCount.items():\n firstEntriesPerYearCountRowL.append({\"Year\": yr, \"Entries_With_Leading_Human_Protein\": count})\n # NOTE - ONLY THE LEADING CLUSTER MEMBERS in firstEntriesPerYearCountRowL...\n return retRowFullL, retRowL, firstEntriesPerYearCountRowL\n\n\ndef leadingHumanProteinSuite():\n suiteSelect = unittest.TestSuite()\n suiteSelect.addTest(LeadingHumanProteinTests(\"testExtractUniProtDetails\"))\n suiteSelect.addTest(LeadingHumanProteinTests(\"testExtractEntryDetails\"))\n suiteSelect.addTest(LeadingHumanProteinTests(\"testfindFirst\"))\n return suiteSelect\n\n\nif __name__ == \"__main__\":\n mySuite = leadingHumanProteinSuite()\n unittest.TextTestRunner(verbosity=2).run(mySuite)\n","sub_path":"rcsb/utils/tests-anal/testLeadingHumanProtein.py","file_name":"testLeadingHumanProtein.py","file_ext":"py","file_size_in_byte":26043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"328372611","text":"\"\"\"\r\nДомашняя рабоат №2\r\n\"\"\"\r\n\r\n\"\"\"\r\nЗадача №1.\r\nНаписать функцию, которая проверяет, является ли переданное число\r\nили строка палиндромом и возвраащет True. В противном случаи возвращает False.\r\nПалиндром - это число или текст, который читается одинаково и слева,\r\nи справа: 939; 49094; 11311.\r\n\"\"\"\r\ndef polindrom(x):\r\n if type(x) == int:\r\n x = str(x)\r\n\r\n if x == x[::-1]:\r\n return True\r\n else:\r\n return False\r\n\r\n\"\"\"\r\nЗадача №2.\r\nНаписать функцию, которая принимает координаты точки (x, y)\r\nи возвращает номер четверти, которой эта точка принадлежит.\r\n\"\"\"\r\n\r\n#Проверкой типа - не заморачиваемся, предполагаем, что передаются числа\r\n\r\ndef chetvert(x, y):\r\n\r\n format_str = 'Вы ввели координаты {} четверти'\r\n if x == 0 or y == 0:\r\n return format_str.format(0)\r\n\r\n if x > 0 and y >0:\r\n return format_str.format(1)\r\n\r\n if x < 0 and y <0:\r\n return format_str.format(3)\r\n\r\n if x > 0 and y <0:\r\n return format_str.format(4)\r\n\r\n return format_str.format(2)\r\n \r\n\"\"\"\r\nЗадача №3.\r\nНаписать функцию, которая принимает список из целых чисел,\r\nнапример - [1, 2, 3, 8, 14, 89, 45],\r\nвыполняет сортировку списка по возрастанию методом пузырька и возвращает получившийся список.\r\n(!) Запрещено использовать встроенные возможности языка для сортировки.\r\n* https://ru.wikipedia.org/wiki/Сортировка_пузырьком#.D..\r\n(!) Т.к. готовый код данной задачи легко нагуглить,\r\nто необходимо пояснить каждую строчку в коде с помощью комментариев!\r\n\"\"\"\r\n#var1\r\n\r\ndef bubble_1(*lst): # объявляем функцию с переменным числом аргументов\r\n lst = list(lst) # полученный кортеж преобразууем в список, чтобы иметь возможность менять местами элементы (кортеж - неизменяемый тип данных)\r\n for i in range(len(lst)): # формируем список \"индексов\" от 0-го до последнего, для возможности прямого прохода по всем элементам списка,\r\n # начиная с 0-го (на каждой итерации получаем номер элемента списка)\r\n for j in range(len(lst) - 1, i, -1): # формируем список \"индексов\" от последнего до i-го, для возможности j,обратного прохода по элементам списка\r\n if lst[j] < lst[j-1]: # если следующий элемент меньше предыдущего\r\n lst[j], lst[j-1] = lst[j-1], lst[j] # пересталяем элементы местами, используя свойства присвоения кортежа значений переменным\r\n return lst # возвращаем отсортированный список\r\n\r\n\r\n\r\n#var2\r\n# Задаем переменную n для возможности выхода из цикла while:\r\n# она позволяет нам cокращать на каждой итерации цикла\r\n# количество просматриваемых элементов (таким образом мы не просматриваем\r\n# последние элементы). Элементы меняются местами если предыдущий\r\n# элемент больше последующего.\r\ndef bubble_2(*args):\r\n n = 1 # \r\n args = list(args)\r\n while n < len(args):\r\n for i in range(len(args)-n):\r\n if args[i] > args[i+1]:\r\n args[i],args[i+1] = args[i+1],args[i]\r\n n += 1\r\n\r\n return args\r\n\r\n#var3\r\n\r\ndef bubble_3(*lst):\r\n k=0\r\n n=1\r\n while n<(len(lst)):\r\n while k<(len(lst)-n):\r\n if lst[k]>lst[k+1]:\r\n lst[k],lst[k+1]=lst[k+1],lst[k]\r\n print(lst) # чтоб видеть \"эволюцию\" списка :-)\r\n k+=1\r\n k=0\r\n n+=1\r\n","sub_path":"HomeWork/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"216009346","text":"'''\n@author Maifeng\n@file:mongo.py\n'''\n\n\n\n'''\n@author Maifeng\n\n'''\nimport pymongo\n'''\n\nmongodb\n'''\nclass Mymongo(object):\n \"\"\"\n docstring for ClassName\n mongo_url:连接mongodb的url地址\n mongo_db:数据库名称\n \"\"\"\n def __init__(self, mongo_url, mongo_db):\n super(Mymongo, self).__init__()\n self.mongo_url = mongo_url\n self.mongo_db = mongo_db\n\n '''\n 连接mongodb本地数据库的方法\n\n '''\n\n def open_client(self):\n self.client = pymongo.MongoClient(self.mongo_url)\n self.db = self.client[self.mongo_db]\n\n '''\n 关闭数据库的方法\n\n '''\n\n def close_client(self):\n self.client.close()\n\n\n def find_one(self, form, data):\n if data:\n data = self.db[form].find_one(data)\n return data\n else:\n return None\n\n def find(self, form, data):\n if data:\n results = []\n datas = self.db[form].find(data)\n for data in datas:\n results.append(data)\n return results\n else:\n return None\n\n def update_one(self, form, condition, data):\n if data:\n result = self.db[form].update_one(condition, {'$set': data}, True)\n return result.raw_result\n else:\n return None\n\n def remove(self, form, data):\n if data:\n result = self.db[form].remove(data)\n return result\n else:\n return None\n\nif __name__ == '__main__':\n mongo = Mymongo('localhost','school_news')\n mongo.open_client()\n # result = mongo.find('news',{'date':'2018-06-01'})\n # result = mongo.update_one('news', {'id':'123'}, {'id':'123','name':'liumeng'})\n\n\n for month in range(12,0,-1):\n if month < 10:\n month = '0' + str(month)\n for day in range(31,0,-1):\n if day < 10:\n day = '0' + str(day)\n # print(day)\n result = mongo.remove('news', {'date':'2017-{month}-{day}'.format(month=month, day=day)})\n print(result)\n\n\n","sub_path":"tools/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"351889836","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 8 09:55:25 2018\n\n@author: May.gloddemon@gmail.com\n\"\"\"\n\ndef collatz(number):\n if number%2==0:\n print(number//2)\n return number//2 \n elif number%2==1:\n print(3*number+1)\n return 3*number+1 \n \nprint('please input a number:')\nwhile True:\n try:\n num=int(input())\n result=collatz(num)\n if result==1:\n break\n else:\n continue\n except ValueError:\n print('please input a number not string!')","sub_path":"Collatz.py","file_name":"Collatz.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"278241588","text":"class Bird:\n song='Squaawk'\n def sing(self):\n print(self.song)\n\nbird = Bird()\nbird.sing()\nbirdsong = bird.sing\nprint(birdsong())\n\nclass Calculator:\n def calculate(self, expression):\n self.value = eval(expression)\n\nclass Talker:\n def talk(self):\n print('Hi, my value is', self.value)\n\nclass TalkingCalculator(Calculator, Talker):\n pass\n\ntc = TalkingCalculator()\ntc.calculate('1+2*3')\ntc.talk()\n\nprint(hasattr(tc, 'talk'))\nprint(callable(getattr(tc,'talk',None)))\n\nsetattr(tc,'name','Gumby')\nprint(tc.name)\n\n\n\n","sub_path":"Class_2.py","file_name":"Class_2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"181781825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 27 17:42:40 2019\n\n@author: sinsakuokazaki\n\"\"\"\n\"\"\"\ndef polindrome(st):\n permutation(st, \"\")\n for pre in prefixList:\n preL = list(pre)\n preFirst = preL[:len(preL) / 2]\n preLast = preL[(len(preL) / 2) - 1:]\n preLast.reverse()\n result = []\n for i in range(len(preL) / 2):\n if preFirst[i] != preLast[i]:\n pass\n else: \n continue\n result.append(prefixList)\n \n \n \n if len(preL) % 2 == 0:\n \n for i in range(len(pre)/2):\n if\n \n else:\n for i in range(len(st) + 1 / 2):\n \npreixList = []\ndef permutation(st, prefix):\n if len(st) == 0:\n preList.append(prefix)\n for i in range(len(st)):\n rem = st[:i] + st[i+1:]\n permutation(rem, prefix + st[i])\n\"\"\"\n#Sample: To permutation of a palindrome, string can have no more than one character that is odd.\ndef isPermutationOfPalindrome(phrase):\n table = buildCharFrequencyTable(phrase)\n return checkMaxOneOdd(table)\n#Check that no more than one character has an odd count\ndef checkMaxOneOdd(table):\n foundOdd = False\n for k in table.keys():\n if table[k] % 2 == 1:\n if foundOdd:\n return False\n foundOdd = True\n return True\n#Map each character to a numeber. a -> 0. b -> 1, c -> 2\n#This is case insensitive. Non-letter characters map to -1\ndef getCharNumber(c):\n if ord(c) >= ord(\"a\") and ord(c) <= ord(\"z\"):\n return ord(c)\n return -1\n#Count how many times each character appears.\ndef buildCharFrequencyTable(phrase):\n table = {}\n for c in phrase:\n num = getCharNumber(c)\n if num != -1:\n if c not in table.keys():\n table[c] = 1\n else: table[c] += 1\n return table \n \n ","sub_path":"permutationOfPalindrome.py","file_name":"permutationOfPalindrome.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"1616407","text":"#!usr/bin/python\n\nimport sys\nclass parser():\n\t\n\t@classmethod\n\tdef pars(self, file_name):\n\t\tfrom xml.dom.minidom import parse\n\t\treturn parse(file_name)\n\n\t@classmethod\n\tdef indexation(self, st, st1, index):\n\t\tfor x in range(len(st)-1):\n\t\t\tres = st[x].split(\"[\")\n\t\t\tif len(res)==1:\n\t\t\t\tindex.append(0)\n\t\t\telif len(res)==2:\n\t\t\t\tst[x] = res[0]\n\t\t\t\tresn = res[1].split(\"]\")\n\t\t\t\tif len(resn)==2 and resn[1]==\"\":\n\t\t\t\t\tindex.append(resn[0])\n\t\t\t\telse:\n\t\t\t\t\tsys.exit(\"[\"+res[1]+\" not valid format\")\n\t\t\telse:\n\t\t\t\tsys.exit(\"incorrect number of \\\"[\\\" in \"+x+\" argument\")\n\n\t\tres = st1[0].split(\"[\")\n\t\tif len(res)==1:\n\t\t\tindex.append(0)\n\t\telif len(res)==2:\n\t\t\tif len(st1)==1:\n\t\t\t\tst[len(st)-1] = res[0]\n\t\t\telse:\n\t\t\t\tst1[0] = res[0]\n\t\t\tresn = res[1].split(\"]\")\n\t\t\tif len(resn)==2 and resn[1]==\"\":\n\t\t\t\tindex.append(resn[0])\n\t\t\telse:\n\t\t\t\tsys.exit(\"[\"+res[1]+\" not valid format\")\n\t\telse:\n\t\t\tsys.exit(\"incorrect number of \\\"[\\\" in \"+str(len(st))+\" argument\")\n\n\t@classmethod\n\tdef text(self, xml, arr, index):\n\t\tfor i in range(len(arr)):\n\t\t\ttry:\n\t\t\t\txml = xml.getElementsByTagName(arr[i])[int(index[i])]\n\t\t\texcept IndexError:\n\t\t\t\tsys.exit(arr[i]+\"[\"+str(index[i])+\"]\"+\" not valid\")\n\t\t\texcept ValueError:\n\t\t\t\tsys.exit(str(index[i])+\" not valid format\")\n\t\ttry:\n\t\t\txml = xml.childNodes[0].nodeValue\n\t\texcept IndexError:\n\t\t\tsys.exit(arr[len(st)-1]+\"[\"+index[len(st)-1]+\"]\"+\" is empty\")\n\t\treturn xml\n\n\t@classmethod\n\tdef atribut(self, xml, arr1, arr2, index):\n\t\tfor j in range(len(arr1)-1):\n\t\t\ttry:\n\t\t\t\txml = xml.getElementsByTagName(arr1[j])[int(index[j])]\n\t\t\texcept IndexError:\n\t\t\t\tsys.exit(arr1[j]+\"[\"+str(index[j])+\"]\"+\" not valid\")\n\t\t\texcept ValueError:\n\t\t\t\tsys.exit(str(index[j])+\" not valid format\")\n\t\ttry:\n\t\t\txml = xml.getElementsByTagName(arr2[0])[int(index[len(arr1)-1])]\n\t\texcept IndexError:\n\t\t\tsys.exit(arr2[1]+\"[\"+str(index[len(arr1)-1])+\"]\"+\" not valid\")\n\t\texcept ValueError:\n\t\t\tsys.exit(str(index[len(arr1)-1])+\" not valid format\")\n\t\treturn xml.getAttribute(arr2[1])\n\n","sub_path":"json_xml/homework/Hrach/XML_parser/xml_parse.py","file_name":"xml_parse.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"19037155","text":"import time\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom datastores.mongo.db import get_new\nfrom multiprocessing import Process\n\ninit_url = 'http://news.google.com'\nmongo_collection = 'murl'\n\n\ndef store_meta(soup, url, database):\n try:\n title = soup.find('title').text\n except AttributeError:\n title = \"no title\"\n\n meta = soup.findAll('meta', attrs={\"name\": \"description\"})\n\n if not meta:\n desc = \"no description\"\n else:\n desc = meta[0].attrs.get('content')\n\n database.mongodb[mongo_collection].update_one(\n {'url': url},\n {'$set': {\n 'title': title,\n 'desc': desc\n }\n },\n upsert=False)\n\n\ndef store_links_in(soup, database):\n for link in soup.find_all('a'):\n href = link.get('href')\n if href and href.startswith('http'):\n domain_list = href.split('/')\n try:\n domain = domain_list[0] + '//' + domain_list[1] + domain_list[\n 2]\n except IndexError:\n print(href, domain_list)\n continue\n\n a = database.mongodb[mongo_collection].find_one({'url': domain})\n\n if a:\n database.mongodb[mongo_collection].update_one(\n {'_id': a['_id']},\n {'$inc': {\n 'hits': 1\n }\n },\n upsert=False)\n else:\n database.mongodb[mongo_collection].insert({'url': domain,\n 'hits': 1,\n \"parsed\": False})\n\n\ndef set_entry_parsed(url, database):\n database.mongodb[mongo_collection].update_one(\n {'url': url},\n {\"$set\": {\n \"parsed\": True\n }\n },\n upsert=False)\n\n\ndef my_process(url):\n database = get_new()\n try:\n soup = BeautifulSoup(urlopen(url), 'html.parser')\n except Exception:\n print(\"Really bad !\")\n return\n\n store_meta(soup, url, database)\n store_links_in(soup, database)\n\n\ndef run_engine(database):\n procs = []\n po = Process(target=clean_up, args=(procs, ))\n po.start()\n while True:\n not_parsed = database.mongodb[mongo_collection].find_one(\n {'parsed': False})\n if not not_parsed:\n continue\n set_entry_parsed(not_parsed['url'], database)\n\n p = Process(target=my_process, args=(not_parsed['url'],))\n procs.append(p)\n p.start()\n time.sleep(0.1)\n\n\ndef kick():\n database = get_new()\n\n database.mongodb[mongo_collection].create_index('parsed')\n my_process(init_url)\n\n run_engine(database)\n\n\ndef clean_up(procs):\n print(\"OK\")\n while True:\n for proc in procs:\n if not proc.is_alive():\n procs.remove(proc)\n print(len(procs))\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n kick()\n","sub_path":"multiprocessing/multiprocessing_main.py","file_name":"multiprocessing_main.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"558448483","text":"import sys\nimport copy\nimport time\nimport math\n\n#read file\nwith open(sys.argv[1]) as nodeFile:\n fileContent = nodeFile.read()\n\n#split file by row\nfileRows = fileContent.split('\\n')\n#they're surprise tools that will help us later\ncityCount, roadCount = map(int, fileRows[0].split(' '))\ntargetCity = int(fileRows[len(fileRows)-2]) #the file ends with an empty row so skip past that\n\n#build graph object that can be parsed for minimum spanning tree\ngraph = []\nfor row in fileRows[1:roadCount+1]:\n rFrom, rTo, rHeight = map(int, row.split(' '))\n graph.append([rHeight, rFrom, rTo])\n\ngraph.sort(key=lambda x: x[0], reverse=True)\n\n#\ncities = []\nfor i in range(cityCount):\n cities.append([i+1])\n\n#build minimum spanning tree\nedges = []\nwhile len(edges) < cityCount-1:\n edge = graph.pop()\n groupA, groupB = [], []\n groupAindex, groupBindex = -1, -1\n for index, group in enumerate(cities):\n if edge[1] in group:\n groupAindex = index\n groupA = group\n if edge[2] in group:\n groupBindex = index\n groupB = group\n if groupAindex == groupBindex:\n continue\n groupA += cities.pop(groupBindex)\n edges.append(edge)\n\n#rebuild graph with spanning tree and create an empty height list\ngraph = {}\nhighests = []\nfor i in range(cityCount):\n graph[i+1] = []\n highests.append(0)\nfor edge in edges:\n graph[edge[1]].append([edge[0], edge[2]])\n graph[edge[2]].append([edge[0], edge[1]])\n\n\npath = [1]\ncurrentNode = 1\npreviousNode = 0\nhighest = 0\n\n#build path\nwhile targetCity not in path:\n if len(graph[currentNode]) > 0:\n step = graph[currentNode].pop(-1)\n if step[1] in path:\n continue\n path.append(step[1])\n if step[0] > highest:\n highest = step[0]\n previousNode = currentNode\n currentNode = path[-1]\n highests[currentNode-1] = highest\n else:\n path.pop(-1)\n currentNode = path[-1]\n highest = highests[currentNode-1]\n\n\nprint(path, highest)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"424756372","text":"import numpy as np\nfrom numpy import einsum, asarray, eye\nfrom .MaterialBase import Material\nfrom Kuru.Tensor import trace, Voigt\n\n\n#####################################################################################################\n # NEARLY INCOMPRESSIBLE NEOHOOKEAN\n#####################################################################################################\n\n\nclass NeoHookean_2(Material):\n \"\"\"Material model for nearly incompressible neo-Hookean with the following internal energy:\n\n W(C) = mu/2*J**(-2/3)*(C:I-3) # for isochoric part\n U(J) = k/2*(J-1)**2 # for volumetric part\n\n \"\"\"\n\n def __init__(self, ndim, **kwargs):\n mtype = type(self).__name__\n super(NeoHookean_2, self).__init__(mtype, ndim, **kwargs)\n\n self.is_transversely_isotropic = False\n self.energy_type = \"internal_energy\"\n self.nature = \"nonlinear\"\n self.fields = \"mechanics\"\n\n if self.ndim==3:\n self.H_VoigtSize = 6\n elif self.ndim==2:\n self.H_VoigtSize = 3\n\n # LOW LEVEL DISPATCHER\n self.has_low_level_dispatcher = True\n\n def KineticMeasures(self,F, elem=0):\n from Kuru.MaterialLibrary.LLDispatch._NeoHookean_2_ import KineticMeasures\n return KineticMeasures(self,np.ascontiguousarray(F))\n\n\n def Hessian(self, StrainTensors, elem=0, gcounter=0):\n \"\"\"Hessian split into isochoroic and volumetric parts\"\"\"\n\n I = StrainTensors['I']\n b = StrainTensors['b'][gcounter]\n J = StrainTensors['J'][gcounter]\n\n mu = self.mu\n\n # ISOCHORIC\n H_Voigt = 2*mu*J**(-5./3.)*(1./9.*trace(b)*einsum('ij,kl',I,I) - \\\n 1./3.*(einsum('ij,kl',b,I) + einsum('ij,kl',I,b)) +\\\n 1./6.*trace(b)*(einsum('ik,jl',I,I) + einsum('il,jk',I,I)) )\n # VOLUMETRIC CHANGES\n if self.is_nearly_incompressible:\n H_Voigt += self.pressure*(einsum('ij,kl',I,I) - (einsum('ik,jl',I,I) + einsum('il,jk',I,I)))\n else:\n H_Voigt += self.kappa*((2.*J-1.)*einsum('ij,kl',I,I) - (J-1.)*(einsum('ik,jl',I,I) + einsum('il,jk',I,I)))\n\n H_Voigt = Voigt(H_Voigt,1)\n\n self.H_VoigtSize = H_Voigt.shape[0]\n\n return H_Voigt\n\n\n def CauchyStress(self,StrainTensors,elem=0,gcounter=0):\n\n I = StrainTensors['I']\n J = StrainTensors['J'][gcounter]\n b = StrainTensors['b'][gcounter]\n\n mu = self.mu\n\n # ISOCHORIC PART\n stress = mu*J**(-5./3.)*(b - 1./3.*trace(b)*I)\n # VOLUMETRIC PART\n if self.is_nearly_incompressible:\n stress += self.pressure*I\n else:\n stress += self.kappa*(J-1.)*I\n\n return stress\n","sub_path":"Kuru/MaterialLibrary/NeoHookean_2.py","file_name":"NeoHookean_2.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60307503","text":"\"\"\"\r\nWAP that defines a function convert() that receives a string containing a sequence of whitespace separated words and returns a string after removing all duplicate words and sorting them alphanumerically\r\n\"\"\"\r\n\r\ndef convert(s):\r\n words = [word for word in s.split()]\r\n return ' '.join(sorted(list(set(words))))\r\n\r\ns = 'I felt happy because i saw the others were happy and because i knew i should feel happy, but i wasn\\'t really happy'\r\nt = convert(s)\r\nprint(t)\r\n\r\n# set() removes duplicate data automatically\r\n# list() converts the set into a list\r\n# sorted() sorts the list data and returns sorted list\r\n# sorted data list is converted to a string using join(), appending a space at the end of each word, except the last","sub_path":"Chapter-10-Functions/Examples/6-convert-string.py","file_name":"6-convert-string.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"358225720","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys,re,os\n\n# 55681:SOS vcpu 0-436 [000] .... 889251407412: acrn_ioreq_complete_request: ioreq[ffff88003e288000] complete, create ts[889251360652] [889251366716] [889251367458] [889251368012] [0] [0] [0]\ndef ioreq_hv_ts(filename):\n interval1 = 0\n interval2 = 0\n interval3 = 0\n interval4 = 0\n interval5 = 0\n interval6 = 0\n print(\"open file {}\".format(filename))\n pa = re.compile(r'\\[(\\d*)\\]')\n count = 0\n with open(filename) as f:\n for line in f.readlines():\n ts = pa.findall(line)\n if int(ts[-5]) == 0 or int(ts[-6]) == 0 or int(ts[-4]) == 0:\n continue\n count = count + 1\n #interval6 += int(ts[-1]) - int(ts[-2])\n #interval5 += int(ts[-2]) - int(ts[-3])\n interval4 += int(ts[-3]) - int(ts[-4])\n interval3 += (int(ts[-4]) - int(ts[-5]))\n interval2 += (int(ts[-5]) - int(ts[-6]))\n interval1 += (int(ts[-6]) - int(ts[-7]))\n print(\"interval1 {}\".format(interval1/count))\n print(\"interval2 {}\".format(interval2/count))\n print(\"interval3 {}\".format(interval3/count))\n print(\"interval4 {}\".format(interval4/count))\n print(\"count {}\".format(count))\n #print(\"interval5 {}\".format(interval5/count))\n #print(\"interval6 {}\".format(interval6/count))\n\ndef parse_init():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--logfile',\n required=True,\n help='log file')\n return parser\n\nif __name__ == \"__main__\":\n print(\"haha\")\n parser = parse_init()\n args = parser.parse_args()\n\n logfile = args.logfile\n\n ioreq_hv_ts(logfile)\n","sub_path":"misc/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"16410584","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 17:29:13 2018\n\n@author: cheating\n\"\"\"\nimport pandas as pd\npd.core.common.is_list_like = pd.api.types.is_list_like\nfrom pandas_datareader import data\nimport fix_yahoo_finance as yf # yahoo專用的拿來拉股票資訊\nimport datetime\nimport talib #技術分析專用\nimport matplotlib.pyplot as plt # 繪圖專用\nimport mpl_finance as mpf # 專門用來畫蠟燭圖的\nimport pylab as pl # 讓圖片的文字可以旋轉\n\n###############################################################################\n# 股票機器人 技術面分析 #\n###############################################################################\n\ndef TheConstructor(userstock):\n # 設定要的資料時間\n start = datetime.datetime.now() - datetime.timedelta(days=365) #先設定要爬的時間\n end = datetime.date.today()\n \n # 與yahoo請求\n pd.core.common.is_list_like = pd.api.types.is_list_like\n yf.pdr_override()\n \n # 取得股票資料\n stock = data.get_data_yahoo(userstock+'.TW', start, end)\n return stock\n\n\n#---------------------------------------- KD指標 ------------------------------------\ndef stock_KD(userstock):\n \n stock=TheConstructor(userstock)\n \n ret = pd.DataFrame(list(talib.STOCH(stock['High'].values, stock['Low'].values, stock['Close'].values))).transpose()\n ret.columns=['K','D']\n ret.index = stock['Close'].index\n if ret.iloc[len(ret)-1]['K'] > ret.iloc[len(ret)-1]['D']:\n return 'K朝上,可買進'\n else:\n return 'K朝下,請三思'\n#-------------------------------- 移動平均線(Moving Average)------------------------------------\ndef stock_MA(userstock):\n stock=TheConstructor(userstock)\n \n ret = pd.DataFrame(talib.SMA(stock['Close'].values,10), columns= ['10-day average']) #10日移動平均線\n ret = pd.concat([ret,pd.DataFrame(talib.SMA(stock['Close'].values,20), columns= ['20-day average'])], axis=1) #10日移動平均線\n ret = pd.concat([ret,pd.DataFrame(talib.SMA(stock['Close'].values,60), columns= ['60-day average'])], axis=1) #10日移動平均線\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['10-day average'] > ret.iloc[len(ret)-1]['60-day average']:\n return '短期線突破長期線,可買進'\n else:\n return '短期線跌破長期線,請三思'\n#-------------------- 指數平滑異同移動平均線(Moving Average Convergence / Divergence)------------------------------------\ndef stock_MACD(userstock):\n stock=TheConstructor(userstock)\n ret=pd.DataFrame()\n ret['MACD'],ret['MACDsignal'],ret['MACDhist'] = talib.MACD(stock['Close'].values,fastperiod=6, slowperiod=12, signalperiod=9)\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['MACD'] > ret.iloc[len(ret)-1]['MACDsignal']:\n return '短期線突破長期線,可買進'\n else:\n return '短期線跌破長期線,請三思'\n#------------------------ 能量潮指標(On Balance Volume)------------------------------------\ndef stock_OBV(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.OBV(stock['Close'].values, stock['Volume'].values.astype(float)), columns= ['OBV'])\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['OBV'] > ret.iloc[len(ret)-2]['OBV']:\n return 'OBV向上'\n else:\n return 'OBV向上向下'\n#------------------------ 威廉指數(Williams Overbought)------------------------------------\ndef stock_William(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.WILLR(stock['High'].values, stock['Low'].values, stock['Open'].values), columns= ['Williams'])\n ret = ret.set_index(stock['Close'].index.values)\n\n if ret.iloc[len(ret)-1]['Williams'] > -20:\n return '威廉指數表示,買進'\n elif ret.iloc[len(ret)-1]['Williams'] < -80:\n return '威廉指數表示,賣出'\n else:\n return '不動作'\n#------------------------ 平均真實區域指標(Average True Range)------------------------------------\ndef stock_ATR(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.ATR(stock['High'].values, stock['Low'].values, stock['Close'].values), columns= ['Average True Range'])\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['Average True Range'] > 0.8:\n return '波動極大'\n else:\n return '微服震動'\n#------------------------ 平均趨向指標(Average Directional Indicator)------------------------------------\ndef stock_ADX(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.ADX(stock['High'].values, stock['Low'].values, stock['Close'].values), columns= ['Average True Range'])\n ret = ret.set_index(stock['Close'].index.values) \n \n if ret.iloc[len(ret)-1]['Average True Range'] > ret.iloc[len(ret)-2]['Average True Range']:\n return '能量向上'\n else:\n return '能量衰減'\n#------------------------ 相對強弱指數(Relative Strength Index)------------------------------------\ndef stock_RSI(userstock):\n stock=TheConstructor(userstock)\n # RSI的天數設定一般是6, 12, 24\n ret = pd.DataFrame(talib.RSI(stock['Close'].values,24), columns= ['Relative Strength Index'])\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['Relative Strength Index'] > 50:\n return '強勢'\n else:\n return '弱勢'\n#------------------------ 資金流動指標(Money Flow Index)------------------------------------\ndef stock_MFI(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.MFI(stock['High'].values,stock['Low'].values,stock['Close'].values,stock['Volume'].values.astype(float), timeperiod=14), columns= ['Money Flow Index'])\n ret = ret.set_index(stock['Close'].index.values)\n\n if ret.iloc[len(ret)-1]['Money Flow Index'] > 50:\n return '強勢'\n else:\n return '弱勢'\n#------------ 接收者操作特徵曲線(Receiver Operating Characteristic Curve)------------------------------------\ndef stock_ROC(userstock):\n stock=TheConstructor(userstock)\n ret = pd.DataFrame(talib.ROC(stock['Close'].values, timeperiod=10), columns= ['Receiver Operating Characteristic curve'])\n ret = ret.set_index(stock['Close'].index.values)\n \n if ret.iloc[len(ret)-1]['Receiver Operating Characteristic curve'] > ret.iloc[len(ret)-2]['Receiver Operating Characteristic curve'] and ret.iloc[len(ret)-1]['Receiver Operating Characteristic curve'] >0:\n return '強勢,可買進'\n elif ret.iloc[len(ret)-1]['Receiver Operating Characteristic curve'] < ret.iloc[len(ret)-2]['Receiver Operating Characteristic curve'] and ret.iloc[len(ret)-1]['Receiver Operating Characteristic curve'] <0:\n return '弱勢,要出場'\n else:\n return '沒有特別操作'","sub_path":"Technical_Analysis.py","file_name":"Technical_Analysis.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"493819005","text":"# Copyright (C) 2010-2015 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# This signature was contributed by RedSocks - http://redsocks.nl\n# See the file 'docs/LICENSE' for copying permission.\n\nfrom lib.cuckoo.common.abstracts import Signature\n\nclass Dyreza(Signature):\n name = \"dyreza\"\n description = \"Creates known Dyreza Banking Trojan files, registry keys and/or mutexes\"\n severity = 3\n categories = [\"banking\"]\n families = [\"dyreza\"]\n authors = [\"RedSocks\"]\n minimum = \"2.0\"\n\n mutexes_re = [\n \".*Rangismutex5\",\n \".*Diper89\",\n \".*Xider78\",\n \".*zx5fwtw4ep\",\n ]\n\n files_re = [\n \".*Temp.*fax.*scr\",\n \".*Temp.*mmo.*txt\",\n \".*tubeini.*exe\",\n \".*mfcsubs.dll\",\n \".*Temp.*mscodecs.exe\",\n \".*system32.*Duser.*dll\",\n ]\n\n def on_complete(self):\n for indicator in self.mutexes_re:\n mutex = self.check_mutex(pattern=indicator, regex=True)\n if mutex:\n self.mark_ioc(\"mutex\", mutex)\n\n for indicator in self.files_re:\n regkey = self.check_file(pattern=indicator, regex=True)\n if regkey:\n self.mark_ioc(\"file\", regkey)\n\n return self.has_marks()\n","sub_path":"signatures/windows/vir_dyreza.py","file_name":"vir_dyreza.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"510642716","text":"from django.urls import path\nfrom rest_framework.routers import SimpleRouter\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView, TokenRefreshView, TokenVerifyView,\n)\n\nfrom .views import RegistrationViewSet\n\n\nrouter = SimpleRouter()\nrouter.register(\n 'registration',\n RegistrationViewSet,\n basename='registration',\n)\n\nurlpatterns = [\n path(\n 'token/verify/',\n TokenVerifyView.as_view(),\n name='token_verify',\n ),\n path(\n 'token/',\n TokenObtainPairView.as_view(),\n name='token_obtain_pair',\n ),\n path(\n 'token/refresh/',\n TokenRefreshView.as_view(),\n name='token_refresh',\n ),\n]\n\nurlpatterns += router.urls\n","sub_path":"core/users/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"607004744","text":"import math\ndef main():\n\tprimes = []\n\tnum = 0\n\twhile(len(primes)!=10001):\n\t\t# for x in primes:\n\t\t# \tif(num%x==0):\n\t\t# \t\tnum = num + 1\n\t\t# \t\tbreak\n\t\t# \telif(x==primes[len(primes)-1]):\n\t\t# \t\tif(num%x!=0):\n\t\t# \t\t\tprimes.append(num)\n\t\t# \t\tnum = num + 1\n\t\t# \t\tbreak\n\t\tif isPrime(num):\n\t\t\tprimes.append(num)\n\t\tnum = num + 1\n\tprint(primes[10000])\n\ndef isPrime(n):\n\tif n <= 1:\n\t\treturn False\n\telif n == 2:\n\t\treturn True\n\tcheck = math.sqrt(n)\n\tfor x in range (2,int(check)+1):\n\t\tif n%x == 0:\n\t\t\treturn False\n\treturn True\nmain()","sub_path":"q7/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"55355720","text":"import random\ndef getInitCentroidPoint(k, data):\n clusters = []\n for i in range(k):\n randIndex = random.randint(0,len(data) - 1)\n clusters.append([data[randIndex]])\n data.pop(randIndex)\n return {\n \"data\": data,\n \"clusters\": clusters\n }","sub_path":"centroid.py","file_name":"centroid.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584236725","text":"#!/usr/bin/python3\nfrom sys import *\nfrom os import *\ndef generationSuivante(liste):\n\tout = []\n\tout.append(liste[0]-1)\n\tout.append(liste[0])\n\tfor i in range(len(liste)-1):\n\t\tout.append((liste[i]+liste[i+1])/2)\n\t\tout.append(liste[i+1])\n\tout.append(liste[len(liste)-1]+1)\n\treturn out\n\n\t\ndef arbre(n):\n\tout = [0]\n\tfor i in range(n):\n\t\tout = generationSuivante(out)\n\treturn out\t\n\t\t\ndef arbreDiad(n):\n\tout = []\n\tfor el in arbre(n):\n\t\tout.append(diad(el))\n\treturn out\ndef diad(n):\n\ti = 0\n\twhile n != int(n):\n\t\tn *= 2\n\t\ti += 1\n\tif i == 0:\n\t\treturn(str(n))\n\telse:\n\t\treturn (str(int(n))+\"/\"+str(2**i))\t\t\n\n\nn = int(argv[1])\nprint(arbreDiad(n))\n#arbre = arbre(int(n))\n#print(arbre)\n#print(len(arbre))\n","sub_path":"JeuxEtStrat/chaudFroid.py","file_name":"chaudFroid.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"213539830","text":"def palindrome():\n name = input(\"Please enter your first name: \")\n cp = name.lower()\n name = list(name.lower())\n rev = []\n \n while name:\n rev.append(name.pop())\n reverse = ''.join(rev)\n reverse_final = reverse[0].upper() + reverse[1:]\n print(reverse_final)\n \n \n if reverse == cp:\n print(\"Palindrome!\")\n \npalindrome()","sub_path":"GraceLinREPO/SUBMISSIONS/week_02/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"63229009","text":"from flask import Flask, render_template, request, redirect\nfrom flask import Blueprint\nfrom werkzeug.utils import redirect\nfrom repositories import task_repository, user_repository\nfrom models.task import Task\n\ntasks_blueprint = Blueprint(\"tasks\", __name__)\n\n@tasks_blueprint.route(\"/tasks\")\ndef tasks():\n tasks = task_repository.select_all()\n return render_template(\"tasks/index.html\", all_tasks = tasks)\n\n# NEW\n# GET '/tasks/new'\n@tasks_blueprint.route(\"/tasks/new\")\ndef new_task():\n users = user_repository.select_all()\n # return \"I'm the new route\"\n # simple string is to check that the route is working\n return render_template(\"/tasks/new.html\", all_users = users)\n\n# CREATE\n# POST '/tasks'\n@tasks_blueprint.route(\"/tasks\", methods = [\"POST\"])\ndef create_task():\n description = request.form[\"description\"]\n duration = request.form[\"duration\"]\n completed = request.form[\"completed\"]\n user_id = request.form[\"user_id\"]\n user = user_repository.select(user_id)\n new_task = Task(description, user, duration, completed)\n task_repository.save(new_task)\n return redirect(\"/tasks\")\n\n# SHOW\n# GET 'tasks/'\n@tasks_blueprint.route(\"/tasks/\", methods = [\"GET\"])\ndef show_task(id):\n task = task_repository.select(id)\n return render_template(\"tasks/show.html\", task = task)\n\n# EDIT\n# GET '/tasks//edit\n@tasks_blueprint.route(\"/tasks//edit\", methods = [\"GET\"])\ndef edit_task(id):\n task = task_repository.select(id)\n users = user_repository.select_all()\n return render_template(\"tasks/edit.html\", task = task, all_users = users)\n\n# UPDATE\n# POST 'tasks/'\n@tasks_blueprint.route(\"/tasks/\", methods = [\"POST\"])\ndef update_task(id):\n description = request.form['description']\n user_id = request.form['user_id']\n duration = request.form['duration']\n completed = request.form['completed']\n user = user_repository.select(user_id)\n task = Task(description, user, duration, completed, id)\n task_repository.update(task)\n return redirect('/tasks')\n\n# DELETE\n# DELETE 'tasks/'\n@tasks_blueprint.route(\"/tasks//delete\", methods = [\"POST\"])\ndef delete_task(id):\n task_repository.delete(id)\n return redirect(\"/tasks\")","sub_path":"controllers/tasks_controller.py","file_name":"tasks_controller.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"591067115","text":"class Node:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.prev = None\n self.next = None\n\n def get_next(self):\n return self.next\n \n def get_prev(self):\n return self.prev\n \n def get_key(self):\n return self.key\n \n def get_value(self):\n return self.value\n \n def set_key(self, new_key):\n self.key = new_key\n \n def set_value(self, new_value):\n self.value = new_value\n \n def set_next(self, new_next):\n self.next = new_next\n \n def set_prev(self, new_prev):\n self.prev = new_prev\n\n\nclass LRUCache:\n \"\"\"\n Our LRUCache class keeps track of the max number of nodes it\n can hold, the current number of nodes it is holding, a doubly-\n linked list that holds the key-value entries in the correct\n order, as well as a storage dict that provides fast access\n to every node stored in the cache.\n \"\"\"\n def __init__(self, limit=10):\n self.limit = limit\n self.size = 0\n self.head = None\n self.tail = None\n self.storage_dict = {}\n \n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the end of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache.\n \"\"\"\n def get(self, key):\n if key not in self.storage_dict:\n return None\n\n node = self.storage_dict[key]\n self.move_to_front(node)\n return node.get_value()\n\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply\n want to overwrite the old value associated with the key with\n the newly-specified value.\n \"\"\"\n def set(self, key, value):\n if key in self.storage_dict:\n node = self.storage_dict[key]\n node.set_value(value)\n self.storage_dict[key] = node\n self.move_to_front(node)\n return\n\n if self.size >= self.limit:\n del self.storage_dict[self.tail.get_key()]\n self.remove_from_tail()\n \n new_node = Node(key, value)\n self.storage_dict[key] = new_node\n self.add_to_head(new_node)\n \n ######### Helper Methods #########\n \n def isEmpty(self):\n return self.size == 0\n\n def __str__(self):\n list_str = \"\"\n curr = self.head\n while curr is not None:\n if curr is self.head:\n list_str += f\"{curr.get_value()}\"\n else:\n list_str += f\" -> {curr.get_value()}\"\n curr = curr.get_next()\n return f\"Size = {self.size}, {{{list_str}}}\\n\"\n\n def delete(self, node):\n if node is self.head:\n self.remove_from_head()\n elif node is self.tail:\n self.remove_from_tail()\n else:\n self.size -= 1\n prev_node = node.get_prev()\n next_node = node.get_next()\n prev_node.set_next(next_node)\n next_node.set_prev(prev_node)\n \n def add_to_head(self, node):\n self.size += 1\n node.set_prev(None)\n node.set_next(self.head)\n if self.head:\n self.head.set_prev(node)\n else:\n self.tail = node\n self.head = node\n \n def remove_from_head(self):\n if self.isEmpty():\n return None\n \n self.size -= 1\n removed_value = self.head.get_value()\n self.head = self.head.get_next()\n if self.head:\n self.head.set_prev(None)\n else:\n self.tail = None\n return removed_value\n\n def remove_from_tail(self):\n if self.isEmpty():\n return None\n \n self.size -= 1\n removed_value = self.tail.get_value()\n self.tail = self.tail.get_prev()\n if self.tail:\n self.tail.set_next(None)\n else:\n self.head = None\n return removed_value\n \n def move_to_front(self, node):\n if node is not self.head:\n self.delete(node)\n self.add_to_head(node)","sub_path":"lru_cache/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184957702","text":"from random import random\nfrom math import pi\nimport numpy as np\nfrom scipy.integrate import odeint\n\nclass cpg(object):\n\tdef __init__(self,num,W,b,T,tau,x0):\n\t\tself.num = num\n\t\tself.b = b\n\t\tself.T = T\n\t\tself.tau = tau\n\t\tself.W = np.array(W)\n\t\tself.x = np.array(x0)\n\t\t\n\tdef __call__(self,dt,s,devide=1000):\n\t\tt = np.linspace(0,dt,devide+1)\n\t\tself.x = odeint(self.func,self.x,t,args=(s,))[-1]\n\t\treturn self.output()\n\n\tdef output(self):\n\t\treturn np.maximum(0,self.x[:self.num])\n\n\tdef func(self,vector,t,s):\n\t\tx0,x1 = np.hsplit(vector,2)\n\t\ty = self.output()\n\t\treturn self.tau*np.r_[-x0-y.dot(self.W)+s-self.b*x1,(-x1+y)/self.T]\n\n\ndef run_cpg(W1,W2,b,T,tau):\n\tneuronum=2\n\tW=[[0,W1],[W2,0]]#W=[[0+change[1],1.083+change[2]],[1.083+change[3],0+change[4]]] \n\t#b=1.35+change[5]\n\t#T=12+change[6]\n\t#tau=3.125+change[7]\n\tx0=[0,0.96,0,0]#x0 = [0+change[8],.96+change[9],0+change[10],0+change[11]]\n\tc = cpg(2,W,b,T,tau,x0)\n\ts=[1.7,1.7]#s = [1.7+change[12],1.7+change[13]]\n\t\n\tt = list(np.linspace(0,2*(pi),10000))\n\tOP=[]\n\tfor t1,t2 in zip(t[:-1],t[1:]):\n\t\tdt = t2-t1\n\t\ty = c(dt,s)\n\t\tOP.append(y[0]-y[1])\n\t\t#print(y[0]-y[1])\n\treturn OP\n\n\t\t\nif __name__ == '__main__':\n\trun_cpg(W1=1.083,W2=1.083,b=1.35,T=12,tau=3.125)","sub_path":"Code/cpg.py","file_name":"cpg.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"321624363","text":"from pylab import *\n\n# R,Q = vgg_rq(S) Just like qr but the other way around.\n#\n# If R,Q = vgg_rq(X), then R is upper-triangular, Q is orthogonal, and X==R*Q.\n# Moreover, if S is a real matrix, then det(Q)>0.\n\n# By awf\n\ndef vgg_rq(S):\n \n S = S.T\n Q, U = qr(fliplr(flipud(S)))\n Q = fliplr(flipud(Q.T))\n U = fliplr(flipud(U.T))\n \n if det(Q) < 0:\n t[:,0] = -t[:,0]\n \n return U, Q","sub_path":"exercise6/exercise6/Python/vgg_rq.py","file_name":"vgg_rq.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"71092225","text":"import sys\n\ndef _repr(obj):\n \"\"\"\n Get the representation of an object, with dedicated pprint-like format for lists.\n \"\"\"\n if isinstance(obj, list):\n return '[' + (',\\n '.join((_repr(e).replace('\\n', '\\n ') for e in obj))) + '\\n]'\n else:\n return repr(obj)\n\nclass Node(object):\n \"\"\"\n Base class example for the AST nodes.\n By default, instances of classes have a dictionary for attribute storage.\n This wastes space for objects having very few instance variables.\n The space consumption can become acute when creating large numbers of instances.\n The default can be overridden by defining __slots__ in a class definition.\n The __slots__ declaration takes a sequence of instance variables and reserves\n just enough space in each instance to hold a value for each variable.\n Space is saved because __dict__ is not created for each instance.\n \"\"\"\n __slots__ = ()\n\n def __repr__(self):\n \"\"\" Generates a python representation of the current node\n \"\"\"\n result = self.__class__.__name__ + '('\n indent = ''\n separator = ''\n for name in self.__slots__[:-1]:\n result += separator\n result += indent\n result += name + '=' + (_repr(getattr(self, name)).replace('\\n', '\\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))\n separator = ','\n indent = '\\n ' + (' ' * len(self.__class__.__name__))\n result += ')'\n return result\n\n def children(self):\n \"\"\" A sequence of all children that are Nodes\n \"\"\"\n pass\n\n def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):\n \"\"\" Pretty print the Node and all its attributes and children (recursively) to a buffer.\n buf:\n Open IO buffer into which the Node is printed.\n offset:\n Initial offset (amount of leading spaces)\n attrnames:\n True if you want to see the attribute names in name=value pairs. False to only see the values.\n nodenames:\n True if you want to see the actual node names within their parents.\n showcoord:\n Do you want the coordinates of each Node to be displayed.\n \"\"\"\n lead = ' ' * offset\n if nodenames and _my_node_name is not None:\n buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')\n else:\n buf.write(lead + self.__class__.__name__+ ': ')\n\n if self.attr_names:\n if attrnames:\n nvlist = [(n, getattr(self, n)) for n in self.attr_names if getattr(self, n) is not None]\n attrstr = ', '.join('%s=%s' % nv for nv in nvlist)\n else:\n vlist = [getattr(self, n) for n in self.attr_names]\n attrstr = ', '.join('%s' % v for v in vlist)\n buf.write(attrstr)\n\n if showcoord:\n if self.coord:\n buf.write('%s' % self.coord)\n buf.write('\\n')\n for (child_name, child) in self.children():\n #child.show(buf, offset + 4, attrnames, nodenames, showcoord, child_name)\n child.show(\n buf,\n offset=offset + 4,\n attrnames=attrnames,\n nodenames=nodenames,\n showcoord=showcoord,\n _my_node_name=child_name)\n\nclass Program(Node):\n __slots__ = ('gdecls','symtab', 'coord')\n\n def __init__(self, gdecls, symtab=None, coord=None):\n self.gdecls = gdecls\n self.symtab = None\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.gdecls or []):\n nodelist.append((\"gdecls[%d]\" % i, child))\n return tuple(nodelist)\n \n def __iter__(self):\n for i in (self.gdecls or []):\n yield i\n\n attr_names = ()\n\n\nclass Coord(object):\n \"\"\" Coordinates of a syntactic element. Consists of:\n - Line number\n - (optional) column number, for the Lexer\n \"\"\"\n __slots__ = ('line', 'column')\n def __init__(self, line, column=None):\n self.line = line\n self.column = column\n\n def __str__(self):\n if self.line:\n coord_str = \" @ %s:%s\" % (self.line, self.column)\n else:\n coord_str = \"\"\n return coord_str\n\n\nclass Constant(Node):\n __slots__ = ('type', 'value', 'coord', 'rawtype', 'gen_location')\n def __init__(self, type, value, coord=None):\n self.type = type\n self.value = value\n self.coord = coord\n self.rawtype = type \n self.gen_location = None\n\n def children(self):\n nodelist = []\n return tuple(nodelist)\n\n def __iter__(self):\n return\n\n attr_names = ('type', 'value', )\n\n\nclass Cast(Node):\n __slots__ = ('cast', 'expression', 'coord', 'type', 'gen_location')\n def __init__(self, cast, expression, coord=None):\n self.cast = cast\n self.expression = expression\n self.coord = coord\n self.type = None\n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.cast is not None: \n nodelist.append((\"cast\", self.cast))\n if self.expression is not None: \n nodelist.append((\"expression\", self.expression))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.cast is not None:\n yield self.cast\n if self.expression is not None:\n yield self.expression\n\n attr_names = ()\n\n\nclass Type(Node):\n __slots__ = ('names', 'coord')\n\n def __init__(self, names, coord=None):\n self.names = names\n self.coord = coord\n\n def children(self):\n nodelist = []\n return tuple(nodelist)\n\n def __iter__(self):\n return\n\n attr_names = ('names', )\n\n\nclass GlobalDecl(Node):\n __slots__ = ('decls', 'coord')\n def __init__(self, decls, coord=None):\n self.decls = decls\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i,decl in enumerate(self.decls or []):\n nodelist.append((\"decls[%d]\" % i, decl))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in (self.decls or []):\n yield i\n\n attr_names = ()\n\n\nclass FuncDecl(Node):\n __slots__ = ('params', 'type', 'coord', 'gen_location')\n def __init__(self, params, type, coord=None):\n self.params = params\n self.type = type\n self.coord = coord\n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.params is not None: \n nodelist.append((\"params\", self.params))\n if self.type is not None: \n nodelist.append((\"type\", self.type))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.params is not None:\n yield self.params\n if self.type is not None:\n yield self.type\n\n attr_names = ()\n\n\nclass FuncDef(Node):\n __slots__ = ('spec', 'decl', 'param_decls', 'body', 'coord','decls')\n def __init__(self, spec, decl, param_decls, body, coord=None):\n self.spec = spec\n self.decl = decl\n self.param_decls = param_decls\n self.body = body\n self.coord = coord\n self.decls = None\n\n def children(self):\n nodelist = []\n if self.spec is not None: \n nodelist.append((\"spec\", self.spec))\n if self.decl is not None: \n nodelist.append((\"decl\", self.decl))\n if self.body is not None: \n nodelist.append((\"body\", self.body))\n for i, child in enumerate(self.param_decls or []):\n nodelist.append((\"param_decls[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.spec is not None:\n yield self.spec\n if self.decl is not None:\n yield self.decl\n if self.body is not None:\n yield self.body\n for child in (self.param_decls or []):\n yield child\n\n attr_names = ()\n\n\nclass FuncCall(Node):\n __slots__ = ('name', 'params', 'coord', 'type', 'gen_location')\n def __init__(self, name, params, coord=None):\n self.name = name\n self.params = params\n self.coord = coord\n self.type = None\n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.name is not None:\n nodelist.append((\"name\", self.name))\n if self.params is not None:\n nodelist.append((\"params\", self.params))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.name is not None:\n yield self.name\n if self.params is not None:\n yield self.params\n\n attr_names = ()\n\n\nclass VarDecl(Node):\n __slots__ = ('declname', 'type', 'coord','gen_location')\n def __init__(self, declname, type, coord=None):\n self.declname = declname\n self.type = type\n self.coord = coord\n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.type is not None: \n nodelist.append((\"type\", self.type))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.type is not None:\n yield self.type\n\n attr_names = ()\n\n\nclass Decl(Node):\n __slots__ = ('name', 'type', 'init', 'coord')\n def __init__(self, name, type, init, coord=None):\n self.name = name\n self.type = type\n self.init = init\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.type is not None: \n nodelist.append((\"type\", self.type))\n if self.init is not None: \n nodelist.append((\"init\", self.init))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.type is not None:\n yield self.type\n if self.init is not None:\n yield self.init\n\n attr_names = ('name',)\n\n\nclass PtrDecl(Node):\n __slots__ = ('type', 'coord')\n def __init__(self, type, coord=None):\n self.type = type\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.type is not None: \n nodelist.append((\"type\", self.type))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.type is not None:\n yield self.type\n\n attr_names = ()\n\n\nclass ArrayRef(Node):\n __slots__ = ('name', 'subscript', 'coord','bind','type','model','gen_location')\n def __init__(self, name, subscript, coord=None):\n self.name = name\n self.subscript = subscript\n self.coord = coord\n self.bind = None\n self.type = None\n self.model = None \n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.name is not None: \n nodelist.append((\"name\", self.name))\n if self.subscript is not None: \n nodelist.append((\"subscript\", self.subscript))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.name is not None:\n yield self.name\n if self.subscript is not None:\n yield self.subscript\n\n attr_names = ()\n\n\nclass ArrayDecl(Node):\n __slots__ = ('type', 'tam', 'coord')\n def __init__(self, type, tam, coord=None):\n self.type = type\n self.tam = tam\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.type is not None: \n nodelist.append((\"type\", self.type))\n if self.tam is not None: \n nodelist.append((\"tam\", self.tam))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.type is not None:\n yield self.type\n if self.tam is not None:\n yield self.tam\n\n attr_names = ()\n\n\nclass DeclList(Node):\n __slots__ = ('decls', 'coord')\n def __init__(self, decls, coord=None):\n self.decls = decls\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.decls or []):\n nodelist.append((\"decls[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in enumerate(self.decls or []):\n yield child\n\n attr_names = ()\n\n\nclass InitList(Node):\n __slots__ = ('expression', 'coord', 'value', 'gen_location')\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n self.value = None\n self.gen_location = None\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.expression or []):\n nodelist.append((\"expression[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in (self.expression or []):\n yield i\n\n attr_names = ()\n\n\nclass ExprList(Node):\n __slots__ = ('expression', 'coord')\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.expression or []):\n nodelist.append((\"expression[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in (self.expression or []):\n yield i\n\n attr_names = ()\n\n\nclass ParamList(Node):\n __slots__ = ('params', 'coord')\n def __init__(self, params, coord=None):\n self.params = params\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.params or []):\n nodelist.append((\"params[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for child in (self.params or []):\n yield child\n\n attr_names = ()\n\n\nclass UnaryOp(Node):\n __slots__ = ('op', 'expression', 'coord', 'gen_location', 'type')\n def __init__(self, op, expression, coord=None):\n self.op = op\n self.expression = expression\n self.coord = coord\n self.gen_location = None\n self.type = None\n\n def children(self):\n nodelist = []\n if self.expression is not None: \n nodelist.append((\"expression\", self.expression))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.expression is not None:\n yield self.expression\n\n attr_names = ('op', )\n\n\nclass BinaryOp(Node):\n __slots__ = ('op', 'left_val', 'right_val', 'coord','type','gen_location')\n def __init__(self, op, left_val, right_val, coord=None):\n self.op = op\n self.left_val = left_val\n self.right_val = right_val\n self.coord = coord\n self.type = None\n self.gen_location = None\n\n def children(self):\n nodelist = []\n if self.left_val is not None:\n nodelist.append((\"left_val\", self.left_val))\n if self.right_val is not None:\n nodelist.append((\"right_val\", self.right_val))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.left_val is not None:\n yield self.left_val\n if self.right_val is not None:\n yield self.right_val\n\n attr_names = ('op', )\n\n\nclass Assignment(Node):\n __slots__ = ('op', 'value1', 'value2', 'coord')\n\n def __init__(self, op, value1, value2, coord=None):\n self.op = op\n self.value1 = value1\n self.value2 = value2\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.value1 is not None: \n nodelist.append((\"value1\", self.value1))\n if self.value2 is not None: \n nodelist.append((\"value2\", self.value2))\n return tuple(nodelist)\n\n def __iter__(self):\n nodelist = []\n if self.value1 is not None:\n yield self.value1\n if self.value2 is not None:\n yield self.value2\n\n attr_names = ('op', )\n\n\nclass Compound(Node):\n __slots__ = ('block_items', 'coord')\n\n def __init__(self, block_items, coord=None):\n self.block_items = block_items\n self.coord = coord.split(\":\")[0]+\":1\"\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.block_items or []):\n nodelist.append((\"block_items[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in (self.block_items or []):\n yield i\n\n attr_names = ()\n\n\nclass EmptyStatement(Node):\n __slots__ = (\"coord\")\n\n def __init__(self, coord=None):\n self.coord = coord\n\n def children(self):\n return ()\n\n def __iter__(self):\n return \n\n attr_names = ()\n\n\nclass Break(Node):\n __slots__ = ('coord')\n def __init__(self, coord=None):\n self.coord = coord\n\n def children(self):\n return ()\n\n def __iter__(self):\n return\n\n attr_names = ()\n\n\nclass Assert(Node):\n __slots__ = ('expression', 'coord')\n\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.expression is not None: \n nodelist.append((\"expression\", self.expression))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.expression is not None:\n yield self.expression\n\n attr_names = ()\n\n\nclass For(Node):\n __slots__ = (\"initial\", \"cond\", \"next\", \"statement\", \"coord\",'label_exit')\n\n def __init__(self, initial, cond, next, statement, coord=None):\n self.initial = initial\n self.cond = cond\n self.next = next\n self.statement = statement\n self.coord = coord\n self.label_exit = None\n\n def children(self):\n nodelist = []\n if self.initial is not None: \n nodelist.append((\"initial\", self.initial))\n if self.cond is not None: \n nodelist.append((\"cond\", self.cond))\n if self.next is not None: \n nodelist.append((\"next\", self.next))\n if self.statement is not None: \n nodelist.append((\"statement\", self.statement))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.initial is not None:\n yield self.initial \n if self.cond is not None:\n yield self.cond \n if self.next is not None:\n yield self.next \n if self.statement is not None:\n yield self.statement\n\n attr_names = ()\n\n\nclass While(Node):\n __slots__ = ('cond', 'statement', 'coord','label_exit')\n\n def __init__(self, cond, statement, coord):\n self.cond = cond\n self.statement = statement\n self.coord = coord\n self.label_exit = None\n\n def children(self):\n nodelist = []\n if self.cond is not None: \n nodelist.append((\"cond\", self.cond))\n if self.statement is not None: \n nodelist.append((\"statement\", self.statement))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.cond is not None:\n yield self.cond\n if self.statement is not None:\n yield self.statement\n\n attr_names = ()\n\n\nclass Return(Node):\n __slots__ = ('expression', 'coord')\n\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.expression is not None: \n nodelist.append((\"expression\", self.expression))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.expression is not None:\n yield self.expression\n\n attr_names = ()\n\n\nclass ID(Node):\n __slots__ = ('name', 'coord','type','bind','scope','gen_location','model')\n def __init__(self, name, coord=None):\n self.name = name\n self.coord = coord\n self.type = None\n self.bind = None\n self.scope = None\n self.model = None\n self.gen_location = None\n\n def children(self):\n nodelist = []\n return tuple(nodelist)\n\n def __iter__(self):\n return\n\n attr_names = ('name',)\n\n\nclass Print(Node):\n __slots__ = ('expression', 'coord')\n\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.expression or []):\n if child is not None:\n nodelist.append((\"expression[%d]\" % i, child))\n return tuple(nodelist)\n\n def __iter__(self):\n for i in (self.expression or []):\n yield i\n\n attr_names = ()\n\n\nclass Read(Node):\n __slots__ = ('expression', 'coord')\n\n def __init__(self, expression, coord=None):\n self.expression = expression\n self.coord = coord\n\n def children(self):\n nodelist = []\n for i, child in enumerate(self.expression or []):\n nodelist.append((\"expression[%d]\" % i, child))\n return tuple(nodelist)\n \n def __iter__(self):\n for i in (self.expression or []):\n yield i\n\n attr_names = ()\n\n\nclass If(Node):\n __slots__ = ('cond', 'true', 'false', 'coord')\n\n def __init__(self, cond, true, false, coord=None):\n self.cond = cond\n self.true = true\n self.false = false\n self.coord = coord\n\n def children(self):\n nodelist = []\n if self.cond is not None: \n nodelist.append((\"cond\", self.cond))\n if self.true is not None: \n nodelist.append((\"true\", self.true))\n if self.false is not None: \n nodelist.append((\"false\", self.false))\n return tuple(nodelist)\n\n def __iter__(self):\n if self.cond is not None:\n yield self.cond\n if self.true is not None:\n yield self.true\n if self.false is not None:\n yield self.false\n\n attr_names = ()\n\n","sub_path":"ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":21751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60471108","text":"import sys\nimport csv\n\nfrom datetime import datetime\n\nimport pytz\n\nfrom django.db import models\nfrom django.db.models.functions import Greatest\nfrom django.core.management.base import BaseCommand\n\n\nfrom rdmo.projects.models import Project, Value\n\n\nclass Command(BaseCommand):\n\n columns = ('id', 'title', 'created', 'updated', 'last_changed')\n\n def add_arguments(self, parser):\n parser.add_argument('since',\n type=lambda s: pytz.utc.localize(datetime.strptime(s, '%Y-%m-%d')),\n help='Date since the projects have been inactive (format: 2022-12-31).')\n parser.add_argument('-o|--output-file', dest='output_file', default=None,\n help='Store the output in a csv file.')\n\n def handle(self, *args, **options):\n # prepare subquery for last_changed\n last_changed_subquery = models.Subquery(\n Value.objects.filter(project=models.OuterRef('pk')).order_by('-updated').values('updated')[:1]\n )\n\n # prepare actual query\n rows = Project.objects.annotate(last_changed=Greatest('updated', last_changed_subquery)) \\\n .filter(last_changed__lt=options['since']) \\\n .order_by('-last_changed') \\\n .values_list(*self.columns)\n\n if rows:\n fp = open(options['output_file'], 'w') if options['output_file'] else sys.stdout\n csv_writer = csv.writer(fp)\n csv_writer.writerow(self.columns)\n csv_writer.writerows(rows)\n fp.close()\n","sub_path":"rdmo/projects/management/commands/find_inactive_projects.py","file_name":"find_inactive_projects.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74460723","text":"# Code Jam 2015\n# Qualification Round B: Pancakes\n\ndef solve(p):\n m = max(p); s = m\n for x in xrange(1,m):\n moves = 0\n for y in p:\n moves += (y-1)/x\n s = min(s,moves+x)\n return s\n\ndef pancakes():\n f = open('output.txt','w')\n with open('input.txt','r') as i:\n t = int(next(i))\n for x in xrange(t):\n n = int(next(i))\n p = map(int,next(i).split())\n f.write(\"Case #%i: %i\\n\" % (x+1,solve(p)))\n\npancakes()\n","sub_path":"CodeJam/2015/QualificationRd/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"510686758","text":"import os\nimport json\nimport time\nimport mlflow\n\nprefix = \"mlflow_tools.export\"\n\n# Databricks tags that cannot be set\ndbx_skip_tags = set([ \"mlflow.user\" ])\n\ndef create_tags(client, run, log_source_info):\n tags = run.data.tags.copy()\n for tag_key in dbx_skip_tags:\n tags.pop(tag_key, None)\n\n if not log_source_info:\n return tags\n\n uri = mlflow.tracking.get_tracking_uri()\n tags[prefix+\".tracking_uri\"] = uri\n dbx_host = os.environ.get(\"DATABRICKS_HOST\",None)\n if dbx_host is not None:\n tags[prefix+\".DATABRICKS_HOST\"] = dbx_host\n now = int(time.time()+.5)\n snow = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(now))\n tags[prefix+\".timestamp\"] = now\n tags[prefix+\".timestamp_nice\"] = snow\n\n tags[prefix+\".run_id\"] = run.info.run_id\n tags[prefix+\".experiment_id\"] = run.info.experiment_id\n exp = client.get_experiment(run.info.experiment_id)\n tags[prefix+\".experiment_name\"] = exp.name\n tags[prefix+\".user_id\"] = run.info.user_id\n\n return tags\n\ndef get_now_nice():\n now = int(time.time()+.5)\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(now))\n\ndef strip_underscores(obj):\n return { k[1:]:v for (k,v) in obj.__dict__.items() }\n\ndef write_json_file(path, dct):\n with open(path, 'w') as f:\n f.write(json.dumps(dct,indent=2)+\"\\n\")\n\ndef read_json_file(path):\n with open(path, \"r\") as f:\n return json.loads(f.read())\n","sub_path":"tools/mlflow_fun/export_import/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504112675","text":"from flask_wtf import FlaskForm\nfrom wtforms import TextField, SelectField, SelectMultipleField, DateField, IntegerField, BooleanField, DateTimeField\nfrom wtforms.validators import Required, NumberRange, Optional\n\nfrom poolscore import app\n\nclass TourneyForm(FlaskForm):\n __abstract__ = True\n\n date = DateField('Date', [Required(message = 'Enter tourney date')])\n ruleset = TextField('Ruleset', [Required(message = 'Ruleset required')], default=\"APA8BALL\")\n scoring_method = TextField('Scoring Method', [Required(message = 'Scoring method required')], default=\"APA8BALL\")\n data = TextField('Data')\n owner_id = SelectField('Owner',[Required(message = 'Select Tourney Owner.')], coerce=int, default = 1)\n\nclass TourneyAddForm(TourneyForm):\n home_team_id = SelectField('Home Team', [NumberRange(min=1,message = 'Select Home Team.')], coerce=int)\n away_team_id = SelectField('Away Team', [NumberRange(min=1,message = 'Select Away Team.')], coerce=int)\n\nclass TourneyEditForm(TourneyForm):\n home_score = IntegerField('Home Score', [Optional(strip_whitespace = True)])\n away_score = IntegerField('Away Score', [Optional(strip_whitespace = True)])\n winner_id = SelectField('Winner', coerce=int)\n active = BooleanField('Active')\n events = TextField('Events')\n\nclass MatchForm(FlaskForm):\n __abstract__ = True\n\n events = TextField('Events')\n data = TextField('Data')\n owner_id = SelectField('Owner',[Required(message = 'Select Match Owner.')], coerce=int, default = 1)\n\nclass MatchAddForm(MatchForm):\n home_players = SelectMultipleField('Home Players', [Required(message = 'Select Home Players.')], coerce=int)\n away_players = SelectMultipleField('Away Players', [Required(message = 'Select Away Players.')], coerce=int)\n\nclass MatchEditForm(MatchForm):\n home_score = IntegerField('Home Score', [Optional(strip_whitespace = True)])\n away_score = IntegerField('Home Score', [Optional(strip_whitespace = True)])\n winner_id = SelectField('Winner', coerce=int)\n active = BooleanField('Active')\n\nclass GameForm(FlaskForm):\n winner_id = SelectField('Winner', coerce=int)\n events = TextField('Events')\n data = TextField('Data')\n active = BooleanField('Active', default = True)\n owner_id = SelectField('Owner',[Required(message = 'Select Game Owner.')], coerce=int, default = 1)\n","sub_path":"poolscore/mod_admin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"117977006","text":"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom RoverFSMStates import *\nimport socket\nimport struct\n\nclass RoverAI :\n\n def __init__( self, agent ) :\n\n self.agent = agent\n self.m_roverData = None\n\n def update( self, dt, roverData ) :\n self.m_roverData = roverData\n\n\nclass RoverAI_FSM( RoverAI ) :\n\n ST_LOOKING_FOR_PATH = 'lookingForPath'\n ST_FORWARD = 'forward'\n ST_BRAKING = 'braking'\n ST_REACHING_ROCK = 'reachingRock'\n ST_PICKING_ROCK = 'pickingRock'\n ST_TEST = 'test'\n\n def __init__( self, agent ) :\n super( RoverAI_FSM, self ).__init__( agent )\n self.m_states = {}\n self.m_states[RoverAI_FSM.ST_LOOKING_FOR_PATH] = STLookingForPath( self )\n self.m_states[RoverAI_FSM.ST_FORWARD] = STForward( self )\n self.m_states[RoverAI_FSM.ST_BRAKING] = STBraking( self )\n self.m_states[RoverAI_FSM.ST_REACHING_ROCK] = STReachingRock( self )\n self.m_states[RoverAI_FSM.ST_PICKING_ROCK] = STPickingRock( self )\n self.m_states[RoverAI_FSM.ST_TEST] = STTest( self )\n \n self.m_currentState = None\n self.m_currentStateId = ''\n\n\n self.setCurrentState( RoverAI_FSM.ST_LOOKING_FOR_PATH )\n\n def setCurrentState( self, stateId ) :\n if ( self.m_currentState != None ) :\n self.m_currentState.onExit()\n\n self.m_currentState = self.m_states[stateId]\n self.m_currentStateId = stateId\n self.m_currentState.onEnter()\n\n def update( self, dt, roverData ) :\n super( RoverAI_FSM, self ).update( dt, roverData )\n ## print( 'RoverAI_FSM::update> ', dt, ' currentState: ', self.m_currentStateId )\n\n if ( self.m_currentState != None ) :\n self.m_currentState.update( dt, roverData )\n if ( self.m_currentState.state == RoverFSMState.ST_FINISHED ) :\n \n # go to next state\n if self.m_currentStateId == RoverAI_FSM.ST_LOOKING_FOR_PATH :\n if self.m_currentState.status == 'found_navigable_area' :\n self.setCurrentState( RoverAI_FSM.ST_FORWARD )\n else :\n self.setCurrentState( RoverAI_FSM.ST_LOOKING_FOR_PATH )\n\n elif self.m_currentStateId == RoverAI_FSM.ST_FORWARD :\n if self.m_currentState.status == 'no_navigable_area' :\n self.setCurrentState( RoverAI_FSM.ST_BRAKING )\n elif self.m_currentState.status == 'rock_in_area' :\n self.setCurrentState( RoverAI_FSM.ST_REACHING_ROCK )\n\n elif self.m_currentStateId == RoverAI_FSM.ST_BRAKING :\n if self.m_currentState.status == 'fully_stopped' :\n self.setCurrentState( RoverAI_FSM.ST_LOOKING_FOR_PATH )\n\n elif self.m_currentStateId == RoverAI_FSM.ST_REACHING_ROCK :\n if self.m_currentState.status == 'rock_reachable' :\n self.setCurrentState( RoverAI_FSM.ST_PICKING_ROCK )\n elif self.m_currentState.status == 'rock_out_of_range' :\n self.setCurrentState( RoverAI_FSM.ST_LOOKING_FOR_PATH )\n\n elif self.m_currentStateId == RoverAI_FSM.ST_PICKING_ROCK :\n if self.m_currentState.status == 'rock_picked' :\n self.setCurrentState( RoverAI_FSM.ST_LOOKING_FOR_PATH )\n\n\nclass RoverAI_BT( RoverAI ) :\n\n def __init__( self, agent ) :\n super( RoverAI_BT, self ).__init__( agent )\n\n\nclass PIDController :\n\n def __init__( self, Kp = 5.0, Kd = 4.0, Ki = 0.001 ) :\n\n self.epv = 0.0\n self.eiv = 0.0\n self.edv = 0.0\n\n self.Kp = Kp\n self.Kd = Kd\n self.Ki = Ki\n\n def reset( self ) :\n self.epv = 0.0\n self.eiv = 0.0\n self.edv = 0.0\n\n def calculate( self, x, xRef, verbose = False ) :\n _epv = x - xRef\n self.edv = _epv - self.epv\n self.epv = _epv\n self.eiv += _epv\n _u = -( self.Kp * self.epv + self.Kd * self.edv + self.Ki * self.eiv )\n if ( verbose ) :\n print( 'x,xRef: ', x, xRef, ' u: ', _u )\n\n return _u\n\nclass RoverMotionController :\n\n def __init__( self ) :\n\n self.m_speedController = PIDController()\n self.m_steerController = PIDController( 15.0, 15.0, 0.0 )\n self.ai = RoverAI_FSM( self )\n\n def update( self, dt, roverData ) :\n self.ai.update( dt, roverData )\n\n def restartNavigationController( self ) :\n self.m_speedController.reset()\n\n def restartSteerController( self ):\n self.m_steerController.reset()\n\n def navigationController( self, v, theta, vRef, thetaRef ) :\n u_throttle = self.m_speedController.calculate( v, vRef, False )\n u_brake = 0\n if u_throttle < 0 :\n u_brake = np.clip( -u_throttle ,0, 10 )\n\n u_throttle = np.clip( u_throttle, 0, 0.2 )\n u_steer = np.clip( thetaRef, -15, 15 )\n\n return [u_throttle,u_brake,u_steer]\n\n def steerController( self, theta, thetaRef ) :\n u_steer = self.m_steerController.calculate( theta, thetaRef, True )\n u_steer = np.clip( u_steer, -15, 15 )\n\n return [0,0,u_steer]\n\n def positionController( self, xRef, yRef ) :\n u_throttle = 0\n u_steer = 0\n return [u_throttle,u_brake,u_steer]\n\ng_roverController = RoverMotionController()\n\n\"\"\"\ng_socket = socket.socket()\ng_host = socket.gethostname()\ng_port = 4571\ng_status = 'idle'\ng_socket.connect( ( g_host, g_port ) )\n\"\"\"\n\ndef onBroadcast( dataPacket ) :\n global g_socket, g_host, g_port, g_status\n print( 'broadcasting' )\n g_socket.send( dataPacket )\n \n \n\nTYPE_STR = type('')\nTYPE_INT = type(0)\nTYPE_FLOAT = type(.0)\n\ndef encode( packet ) :\n _res = b''\n for q in range( len( packet ) ) :\n if type( packet[q] ) == TYPE_STR :\n _res += packet[q].encode( 'utf-8' )\n elif type( packet[q] ) == TYPE_INT :\n print( 'encoding ', packet[q], ' floats' )\n _res += chr( packet[q] ).encode( 'utf-8' )\n elif type( packet[q] ) == TYPE_FLOAT :\n _res += struct.pack( 'f', packet[q] )\n return _res\n\ndef encodeData() :\n global g_roverState\n _packet = ['s']\n\n if g_roverState == None:\n _encoded_str += 'xxx'\n return _encoded_str\n\n _packet.append( 'o' )\n _packet.append( 'k' )\n\n if g_roverState.navigationPath :\n encodeNavPathData( g_roverState.navigationPath, _packet )\n if g_roverState.navigationMesh :\n encodeNavMeshData( g_roverState.navigationMesh, _packet )\n\n return encode( _packet )\n\ndef encodeNavPathData( navPath, workingpacket ) :\n workingpacket.append( 'n' )\n _pts = navPath.getPoints()\n _numPoints = len( _pts )\n workingpacket.append( _numPoints * 2 )\n for q in range( _numPoints ) :\n workingpacket.append( _pts[q][0] )\n workingpacket.append( _pts[q][1] )\n\ndef encodeNavMeshData( navMesh, workingpacket ) :\n pass\n\ng_roverState = None\n\n# This is where you can build a decision tree for determining throttle, brake and steer \n# commands based on the output of the perception_step() function\ndef decision_step(Rover):\n global g_roverController, g_socket, g_roverState\n\n g_roverState = Rover\n\n ### _dataPacket = encodeData()\n ### onBroadcast( _dataPacket )\n\n # Implement conditionals to decide what to do given perception data\n # Here you're all set up with some basic functionality but you'll need to\n # improve on this decision tree to do a good job of navigating autonomously!\n g_roverController.update( Rover.time_struct['delta'], Rover )\n\n \"\"\"\n # Example:\n # Check if we have vision data to make decisions with\n if Rover.nav_angles is not None:\n # Check for Rover.mode status\n if Rover.mode == 'forward': \n # Check the extent of navigable terrain\n if len(Rover.nav_angles) >= Rover.stop_forward: \n # If mode is forward, navigable terrain looks good \n # and velocity is below max, then throttle \n if Rover.vel < Rover.max_vel:\n # Set throttle value to throttle setting\n Rover.throttle = Rover.throttle_set\n else: # Else coast\n Rover.throttle = 0\n Rover.brake = 0\n # Set steering to average angle clipped to the range +/- 15\n Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n # If there's a lack of navigable terrain pixels then go to 'stop' mode\n elif len(Rover.nav_angles) < Rover.stop_forward:\n # Set mode to \"stop\" and hit the brakes!\n Rover.throttle = 0\n # Set brake to stored brake value\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n Rover.mode = 'stop'\n\n # If we're already in \"stop\" mode then make different decisions\n elif Rover.mode == 'stop':\n # If we're in stop mode but still moving keep braking\n if Rover.vel > 0.2:\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n # If we're not moving (vel < 0.2) then do something else\n elif Rover.vel <= 0.2:\n # Now we're stopped and we have vision data to see if there's a path forward\n if len(Rover.nav_angles) < Rover.go_forward:\n Rover.throttle = 0\n # Release the brake to allow turning\n Rover.brake = 0\n # Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning\n Rover.steer = -15 # Could be more clever here about which way to turn\n # If we're stopped but see sufficient navigable terrain in front then go!\n if len(Rover.nav_angles) >= Rover.go_forward:\n # Set throttle back to stored value\n Rover.throttle = Rover.throttle_set\n # Release the brake\n Rover.brake = 0\n # Set steer to mean angle\n Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n Rover.mode = 'forward'\n # Just to make the rover do something \n # even if no modifications have been made to the code\n else:\n Rover.throttle = Rover.throttle_set\n Rover.steer = 0\n Rover.brake = 0\n \"\"\"\n # If in a state where want to pickup a rock send pickup command\n if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:\n Rover.send_pickup = True\n \n return Rover\n\n","sub_path":"code/decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":10869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"615120851","text":"import numpy as np\nimport pickle, pandas, textblob, string, sys\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import decomposition, ensemble\n\n\ndef get_clean_data():\n \"\"\"\n read the clean data, which was already preprossed and return some clean data\n\n return: a tuple of clean data (text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode)\n \"\"\"\n try:\n with open('../../data/word_dic.p', 'rb') as f:\n u = pickle._Unpickler(f)\n u.encoding = 'latin1'\n X_train, X_val, X_test, train_text, val_text, test_text, y_train, y_val, y_test, wordtoix, ixtoword = u.load()\n except:\n print('cannot read the clean data')\n exit(1)\n\n text = train_text + val_text + test_text\n num_train, num_val, num_test = len(X_train), len(X_val), len(X_test) \n y_train_encode, y_val_encode, y_test_encode = [], [], []\n\n for i in range(num_train):\n for k in range(4):\n if y_train[i][k] == 1:\n y_train_encode.append(k)\n break\n\n for i in range(num_val):\n for k in range(4):\n if y_val[i][k] == 1:\n y_val_encode.append(k)\n break\n\n for i in range(num_test):\n for k in range(4):\n if y_test[i][k] == 1:\n y_test_encode.append(k)\n break\n\n return text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode\n\n\ndef generate_word_count(clean_data):\n \"\"\"\n gerenerate the word count feature\n \n @param: text: type, a tuple, a tuple of clean data (text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode)\n return: void\n \"\"\"\n text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode = clean_data\n\n count_vec = CountVectorizer(token_pattern=r'w{1,}')\n count_vec.fit(text)\n\n X_train_count = count_vec.transform(train_text)\n X_val_count = count_vec.transform(val_text)\n X_test_count = count_vec.transform(test_text)\n\n pickle.dump([X_train_count, X_val_count, X_test_count, y_train_encode, y_val_encode, y_test_encode], open(\"../../data/word_count.p\", \"wb\"))\n\n\ndef generate_TFIDF(clean_data):\n \"\"\"\n gerenerate the word level TF-IDF feature\n \n @param: text: type, a tuple, a tuple of clean data (text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode)\n return: void\n \"\"\"\n text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode = clean_data\n\n # word level tf-idf\n tfidf_vec = TfidfVectorizer(analyzer='word', token_pattern=r'\\w{1,}', max_features=5000)\n tfidf_vec.fit(text)\n\n X_train_tfidf = tfidf_vec.transform(train_text)\n X_val_tfidf = tfidf_vec.transform(val_text)\n X_test_tfidf = tfidf_vec.transform(test_text)\n\n pickle.dump([X_train_tfidf, X_val_tfidf, X_test_tfidf, y_train_encode, y_val_encode, y_test_encode], open(\"../../data/tfidf.p\", \"wb\"))\n\n\ndef generate_ngram(clean_data):\n \"\"\"\n gerenerate the 2-gram and 3-gram\n \n @param: text: type, a tuple, a tuple of clean data (text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode)\n return: void\n \"\"\"\n text, train_text, val_text, test_text, y_train_encode, y_val_encode, y_test_encode = clean_data\n\n # ngram level tf-idf \n tfidf_vec_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\\w{1,}', ngram_range=(2,3), max_features=5000)\n tfidf_vec_ngram.fit(text)\n\n X_train_tfidf_ngram = tfidf_vec_ngram.transform(train_text)\n X_val_tfidf_ngram = tfidf_vec_ngram.transform(val_text)\n X_test_tfidf_ngram = tfidf_vec_ngram.transform(test_text)\n\n pickle.dump([X_train_tfidf_ngram, X_val_tfidf_ngram, X_test_tfidf_ngram, y_train_encode, y_val_encode, y_test_encode], open(\"../../data/tfidf_ngram.p\", \"wb\"))\n\n\ndef main():\n clean_data = get_clean_data()\n generate_word_count(clean_data)\n generate_TFIDF(clean_data)\n generate_ngram(clean_data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"MH-Term-Project-master/src/features/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"100536251","text":"from collections import deque\n\ndx=[-1,0,1,0]\ndy=[0,1,0,-1]\n\nn, m=map(int, input().split())\nboard=[list(map(int, input().split())) for _ in range(m)]\n\nQ=deque()\ndis=[[0]*n for _ in range(m)]\n\nfor i in range(m):\n for j in range(n):\n if board[i][j]==1:\n Q.append((i, j))\nwhile Q:\n tmp=Q.popleft()\n for k in range(4): \n x=tmp[0]+dx[k]\n y=tmp[1]+dy[k]\n if 0<=xresult:\n result=dis[i][j]\n print(result)\nelse:\n print(-1) ","sub_path":"inflearn/BFS/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"3070863","text":"def successive_poisson(tau1, tau2, size=1):\n \"\"\"Compute time for arrival of 2 successive Poisson processes.\"\"\"\n # tau1, tau2 waiting times for no-hitter and hit-the-cycle baseball events\n # Draw samples out of first exponential distribution: t1\n t1 = np.random.exponential(tau1, size)\n\n # Draw samples out of second exponential distribution: t2\n t2 = np.random.exponential(tau2, size)\n\n return t1 + t2\n \n# Draw samples of waiting times: waiting_times\n# waiting time for no-hitter, tau1 = 764 games\n# waiting time for hit-the-cycle, tau2 = 715 games\nwaiting_times = successive_poisson(764, 715, size=100000)\n\n# Make the histogram\nplt.hist(waiting_times, bins=100, normed=True, histtype='step')\n\n# Label axes\nplt.xlabel('waiting times')\nplt.ylabel('probability')\n\n# Show the plot\nplt.show()\n","sub_path":"datacamp/statistical-thinking-python/01_04_baseball_exponential.py","file_name":"01_04_baseball_exponential.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"588374887","text":"\"\"\"\nDifferent randomization options for selective sampler.\n\nMain method used in selective sampler is the gradient method which\nshould be a gradient of the negative of the log-density. For a \nGaussian density, this will be a convex function, not a concave function.\n\"\"\"\n\nimport numpy as np\nimport regreg.api as rr\nfrom scipy.stats import laplace, norm as ndist\n\nclass randomization(rr.smooth_atom):\n\n def __init__(self, shape, density, grad_negative_log_density, sampler):\n rr.smooth_atom.__init__(self,\n shape)\n self._density = density\n self._grad_negative_log_density = grad_negative_log_density\n self._sampler = sampler\n\n def smooth_objective(self, perturbation, mode='both', check_feasibility=False):\n \"\"\"\n Compute the negative log-density and its gradient.\n \"\"\"\n if mode == 'func':\n return self.scale(-np.log(self._density(perturbation)))\n elif mode == 'grad':\n return self.scale(self._grad_negative_log_density(perturbation))\n elif mode == 'both':\n return self.scale(-np.log(self._density(perturbation))), self.scale(self._grad_negative_log_density(perturbation))\n else:\n raise ValueError(\"mode incorrectly specified\")\n\n def sample(self, size=()):\n return self._sampler(size=size)\n\n def gradient(self, perturbation):\n \"\"\"\n Evaluate the gradient of the log-density.\n\n Parameters\n ----------\n\n perturbation : np.float\n\n Returns\n -------\n\n gradient : np.float\n \"\"\"\n return self.smooth_objective(perturbation, mode='grad')\n\n @staticmethod\n def isotropic_gaussian(shape, scale):\n rv = ndist(scale=scale, loc=0.)\n density = lambda x: rv.pdf(x)\n grad_negative_log_density = lambda x: x / scale**2\n sampler = lambda size: rv.rvs(size=shape + size)\n return randomization(shape, density, grad_negative_log_density, sampler)\n\n @staticmethod\n def gaussian(covariance):\n precision = np.linalg.inv(covariance)\n sqrt_precision = np.linalg.cholesky(precision)\n _det = np.linalg.det(covariance)\n p = covariance.shape[0]\n _const = np.sqrt((2*np.pi)**p * _det)\n density = lambda x: np.exp(-(x * precision.dot(x)).sum() / 2) / _const\n grad_negative_log_density = lambda x: precision.dot(x)\n sampler = lambda size: sqrt_precision.dot(np.random.standard_normal((p,) + size))\n return randomization((p,), density, grad_negative_log_density, sampler)\n\n @staticmethod\n def laplace(shape, scale):\n rv = laplace(scale=scale, loc=0.)\n density = lambda x: rv.pdf(x)\n grad_negative_log_density = lambda x: np.sign(x) / scale\n sampler = lambda size: rv.rvs(size=shape + size)\n return randomization(shape, density, grad_negative_log_density, sampler)\n\n @staticmethod\n def logistic(shape, scale):\n # from http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.logistic.html\n density = lambda x: (np.exp(-x / scale) / (1 + np.exp(-x / scale))**2) / scale\n # negative log density is (with \\mu=0)\n # x/s + log(s) + 2 \\log (1 + e(-x/s))\n grad_negative_log_density = lambda x: (1 - np.exp(-x / scale)) / ((1 + np.exp(-x / scale)) * scale)\n sampler = lambda size: np.random.logistic(loc=0, scale=scale, size=shape + size)\n return randomization(shape, density, grad_negative_log_density, sampler)\n","sub_path":"selection/randomized/randomization.py","file_name":"randomization.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"53776317","text":"\"\"\"\n@author: lcantunes\n\"\"\"\nfrom tkinter import *\nfrom random import randint\nimport turtle\n\n#------------------------------Drawings----------------------------------------\n\ndef drawing1():\n t.reset()\n t.forward(100)\n t.right(90)\n t.forward(100)\n t.right(90)\n t.forward(100)\n t.right(90)\n t.forward(100)\n \ndef drawing2():\n global stop_key\n t.reset()\n def draw_branch(len):\n if(len > 5):\n t.speed(200)\n t.color(\"brown\")\n t.forward(len)\n t.right(25 )\n draw_branch(len - randint(4,10))\n t.left(50)\n draw_branch(len - randint(4,10))\n t.right(25)\n t.color(\"brown\")\n t.backward(len)\n else:\n t.speed(200)\n t.color(\"green\", \"green\")\n t.begin_fill()\n t.circle(10+ randint(0,5))\n t.end_fill() \n def draw_tree(start_len):\n t.reset()\n t.pendown()\n t.setheading(90)\n t.color(\"brown\")\n t.pensize(3)\n draw_branch(start_len)\n t.speed(200)\n t.penup()\n t.goto(-100, -200)\n draw_tree(randint(35, 50))\n\n#------------------------------------------------------------------------------\nversion = 1.0\n\nwelcome = \"\"\"Hello and welcome to Draw My Thing (version {0}) This is a round\nbased game that consists of one player drawing and the other players\nguessing. Due to my inability to create proper code, this version of the game\nis slightly different. This version is singleplayer and consists of the player\nguessing a randomly generated drawing.\n\"\"\".format(version)\n\nimage_dictionary = {\"drawing1\":drawing1,\"drawing2\":drawing2}\nindex_dictionary = {\"drawing1\":0,\"drawing2\":1}\nname_list = [\"\\n - Drawing1\",\"\\n - Drawing2\"]\nguess = \"CoMpLiC4T3D\"\nguess_list = [\"square\",\"tree\"]\ncounter = 0\npoints = 0\n\ntext1 = \"\"\"Welcome to Draw My Thing.\\n\\nHere is a list of available drawings to\nchoose from:{0}{1}\\n\nTo choose a drawing simply type it's name into the text box.\\n\\n\"\"\".format(name_list[0],name_list[1])\n\ntext2 = \"\"\"Your choice was recorded, you have 3 guesses. If you guess right, 1\npoint will be awarded.\n\"\"\"\n\ntext4 = \"\"\"You ran out of guesses.\\nGame OVER.\n\"\"\"\n#------------------------------------------------------------------------------\n\nwindow = Tk()\nwindow.title(\"Draw My Python\")\n\nwindow.withdraw()\n\nawindow = Toplevel(window)\n#------------------------------------------------------------------------------\n\ndef change_window():\n #remove the other window entirely\n# awindow.withdraw()\n\n #make root visible again\n window.iconify()\n window.deiconify()\n \ndef change_window2():\n #remove the other window entirely\n window.withdraw()\n\n #make root visible again\n awindow.iconify()\n awindow.deiconify()\n \n#------------------------------------------------------------------------------\nside = Frame(window)\nside.pack(side=LEFT)\n\ncanvas_frame = Frame(window)\n#canvas_frame.configure(borderwidth=1.5,background='black')\n\ncanvas = Canvas(window, width = 500, height = 500)\nt = turtle.RawTurtle(canvas)\ncanvas.pack(side=RIGHT, fill=BOTH)\n\n#t.pencolor(\"#ff0000\") \n \nmessages = Text(side,font=(\"Arial\"))\nmessages.insert(INSERT,text1)\n\nback_button = Button(side, text = \"Back\", font =(\"Arial\",25),command=change_window2).pack(side=TOP, anchor=NW)\n\nmessages.pack(fill=BOTH)\n\ninput_user = StringVar()\ninput_field = Entry(side, text=input_user)\ninput_field.pack(side=BOTTOM,fill=BOTH)\n\ndef Enter_pressed(event):\n global guess\n global counter\n global points\n global name_list\n input_get = input_field.get()\n input_field.delete(0, END)\n print(input_get)\n value_lower = input_get.lower()\n print(value_lower,\"t\")\n if value_lower in image_dictionary.keys():\n guess_key = list(index_dictionary.keys()).index(value_lower)\n print(guess_key)\n guess = guess_list[guess_key]\n print(guess)\n counter = 4\n messages.delete(1.0, END)\n messages.insert(INSERT,text2)\n temp = image_dictionary[value_lower]\n del image_dictionary[value_lower]\n result = ''\n for c in name_list[guess_key]:\n if c.isalpha():\n result = result + c + '\\u0336'\n elif c in [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n result = result + c + '\\u0336'\n else:\n result = result + c\n name_list[guess_key] = result\n temp()\n else:\n if value_lower == guess:\n t.speed(0)\n points += 1\n text98 = \"\"\"\\nHere is a list of available drawings to\nchoose from:{0}{1}\\n\nTo choose a drawing simply type it's name into the text box.\n(Previous drawings are not available.)\\n\\n\"\"\".format(name_list[0],name_list[1])\n text99 = \"\"\"You guessed it. You have been awarded 1 point. You have {0} points\nso far this session, keep going.\\n\"\"\".format(points)\n text3 = text99 + text98\n messages.delete(1.0, END)\n messages.insert(INSERT,text3)\n counter = 0\n guess = \"CoMpLiC4T3D\"\n elif counter == 2:\n messages.delete(1.0, END)\n messages.insert(INSERT,text4)\n counter = 0\n guess = \"CoMpLiC4T3D\"\n else:\n counter -= 1\n messages.insert(INSERT, '%s\\n' % input_get)\n input_user.set('')\n return \"break\"\n \ninput_field.bind(\"\", Enter_pressed)\n\n#------------------------------------------------------------------------------\n\nwelcome_label = Label(awindow, text = welcome, font=(\"Arial\",20)) #main text\nwelcome_label.grid(column=0,row=0)\n \nplay_bt = Button(awindow, text = \"Play\", font =(\"Arial\",25),command=change_window) #play button\nplay_bt.grid(column=0,row=1)\n \n#scoreboard_bt = Button(awindow, text = \"Scoreboard\", font =(\"Arial\",25)) #scoreboard button\n#scoreboard_bt.grid(column=0,row=2)\n#\n#about_bt = Button(awindow, text = \"About\", font =(\"Arial\",25)) #about button\n#about_bt.grid(column=0,row=3)\n \nwindow.mainloop()\n","sub_path":"DrawMyThing.py","file_name":"DrawMyThing.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632275004","text":"# CMPT 145 - Algorithms\n# The Maximum Slice Problem\n# Given a list A containing (positive and negative) numbers,\n# Find the slice A[a:b] that has the maximum sum\n\nimport random as rand\nimport time as time\n\n\n# version 0: naively sums each slice\ndef maxslice_brute_force_v0(alist):\n \"\"\"\n Find the maximum sum of all slices of alist.\n :param alist: a list of numbers\n :return: a number, the maximum slice sum\n \"\"\"\n # using brute force: look at all possible slices\n maxsum = alist[0]\n for i in range(len(alist)):\n for j in range(len(alist)):\n slice = sum(alist[i:j + 1])\n if slice > maxsum:\n maxsum = slice\n return maxsum\n\n\n# version 1: sums the slices more cleverly\ndef maxslice_brute_force_v1(alist):\n \"\"\"\n Find the maximum sum of all slices of alist.\n :param alist: a list of numbers\n :return: a number, the maximum slice sum\n \"\"\"\n # using brute force: look at all possible slices\n # but store all the partial sums in a dictionary\n # where s[i,j] stores the value sum(alist[i,j+1])\n s = {}\n maxsum = alist[0]\n for i in range(len(alist)):\n s[i,i] = alist[i]\n if s[i,i] > maxsum: maxsum = s[i,i]\n for j in range(i+1, len(alist)):\n s[i, j] = s[i,j-1] + alist[j]\n if s[i,j] > maxsum: maxsum = s[i,j]\n return maxsum\n\n\n# version 2 uses divide and conquer\n# Divide the list into 2 halves\n# The maximum slice can occur in one of 3 ways:\n# 1. Starts and finishes on the left half\n# 2. Starts and finishes on the right half\n# 3. Starts somewhere in the left half, and\n# continues somewhere to the right half\ndef maxslice_DC(alist):\n \"\"\"\n Find the maximum sum of all slices of alist.\n :param alist: a list of numbers\n :return: a number, the maximum slice sum\n \"\"\"\n # using divide and conquer\n\n def max_tail(left, right):\n \"\"\"\n Calculate the maximum slice that ends at right\n (from any point starting at left or later)\n \"\"\"\n s = {}\n s[right] = alist[right]\n maxsum = s[right]\n # calculate the sums from right to left (backwards)\n for i in range(right - 1, left - 1, -1):\n s[i] = s[i + 1] + alist[i]\n if s[i] > maxsum: maxsum = s[i]\n return maxsum\n\n def max_head(left, right):\n \"\"\"\n Calculate the maximum slice that starts at left\n (to any point up to and including right)\n \"\"\"\n s = {}\n s[left] = alist[left]\n maxsum = s[left]\n for i in range(left + 1, right + 1):\n s[i] = s[i - 1] + alist[i]\n if s[i] > maxsum: maxsum = s[i]\n return maxsum\n\n def maxslice_rec(left, right):\n \"\"\"\n Recursively find the maximum slice between left and right.\n \"\"\"\n # using divide and conquer\n if left == right:\n return alist[left]\n else:\n # divide, and solve\n mid = (right + left) // 2\n max_left = maxslice_rec(left, mid)\n max_right = maxslice_rec(mid + 1, right)\n max_cross = max_tail(left, mid) + max_head(mid + 1, right)\n # conquer\n return max(max_left, max_right, max_cross)\n\n # body of maxslice_DC\n return maxslice_rec(0, len(alist) - 1)\n\n\n# put the versions through their paces\nif __name__ == '__main__':\n examples = [[1, 2, 3, 4, 5],\n [5, 4, -3, 2, 1],\n [1, 2, -3, 4, 5],\n [1, -2, 3, 4, -5],\n [1, -2, 3, 4, -5, 1, 2, 3, 4, 5, -6, -7, -8],\n [rand.randint(-100, 100) for i in range(1000)],\n [rand.randint(-100, 100) for i in range(2000)]\n ]\n\n for ex in examples:\n print('Example: list of length:', len(ex))\n\n print('Brute Force version 0:')\n start = time.process_time()\n result = maxslice_brute_force_v0(ex)\n end = time.process_time()\n print('Result:', result, 'Time:', (end - start))\n\n print('Brute Force version 1:')\n start = time.process_time()\n result = maxslice_brute_force_v1(ex)\n end = time.process_time()\n print('Result:', result, 'Time:', (end - start))\n\n print('Divide and conquer:')\n start = time.process_time()\n result = maxslice_DC(ex)\n end = time.process_time()\n print('Result:', result, 'Time:', (end - start))\n\n print()","sub_path":"examples/ch19/max-slice.py","file_name":"max-slice.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"218274749","text":"#! /usr/bin/env python\n\"\"\"\nCluster and crossmatch tools and analysis functions.\n\nIncludes:\n- DBSCAN clustering\n\"\"\"\n\nfrom __future__ import print_function\n\n__author__= \"Paul Hancock\"\n\nimport numpy as np\nimport math\n\nfrom .angle_tools import gcd, bear\nfrom .catalogs import load_table, table_to_source_list\n\n# join the Aegean logger\nimport logging\nlog = logging.getLogger('Aegean')\n\ncc2fwhm = (2 * math.sqrt(2 * math.log(2)))\nfwhm2cc = 1/cc2fwhm\n\n\ndef norm_dist(src1, src2):\n \"\"\"\n Calculate the normalised distance between two sources.\n Sources are elliptical Gaussians.\n\n The normalised distance is calculated as the GCD distance between the centers,\n divided by quadrature sum of the radius of each ellipse along a line joining the two ellipses.\n\n For ellipses that touch at a single point, the normalized distance will be 1/sqrt(2).\n\n Parameters\n ----------\n src1, src2 : object\n The two positions to compare. Objects must have the following parameters: (ra, dec, a, b, pa).\n\n Returns\n -------\n dist: float\n The normalised distance.\n\n \"\"\"\n if np.all(src1 == src2):\n return 0\n dist = gcd(src1.ra, src1.dec, src2.ra, src2.dec) # degrees\n\n # the angle between the ellipse centers\n phi = bear(src1.ra, src1.dec, src2.ra, src2.dec) # Degrees\n # Calculate the radius of each ellipse along a line that joins their centers.\n r1 = src1.a*src1.b / np.hypot(src1.a * np.sin(np.radians(phi - src1.pa)),\n src1.b * np.cos(np.radians(phi - src1.pa)))\n r2 = src2.a*src2.b / np.hypot(src2.a * np.sin(np.radians(180 + phi - src2.pa)),\n src2.b * np.cos(np.radians(180 + phi - src2.pa)))\n R = dist / (np.hypot(r1, r2) / 3600)\n return R\n\n\ndef sky_dist(src1, src2):\n \"\"\"\n Great circle distance between two sources.\n A check is made to determine if the two sources are the same object, in this case\n the distance is zero.\n\n Parameters\n ----------\n src1, src2 : object\n Two sources to check. Objects must have parameters (ra,dec) in degrees.\n\n Returns\n -------\n distance : float\n The distance between the two sources.\n\n See Also\n --------\n :func:`AegeanTools.angle_tools.gcd`\n \"\"\"\n\n if np.all(src1 == src2):\n return 0\n return gcd(src1.ra, src1.dec, src2.ra, src2.dec) # degrees\n\n\ndef pairwise_ellpitical_binary(sources, eps, far=None):\n \"\"\"\n Do a pairwise comparison of all sources and determine if they have a normalized distance within\n eps.\n\n Form this into a matrix of shape NxN.\n\n\n Parameters\n ----------\n sources : list\n A list of sources (objects with parameters: ra,dec,a,b,pa)\n\n eps : float\n Normalised distance constraint.\n\n far : float\n If sources have a dec that differs by more than this amount then they are considered to be not matched.\n This is a short-cut around performing GCD calculations.\n\n Returns\n -------\n prob : numpy.ndarray\n A 2d array of True/False.\n\n See Also\n --------\n :func:`AegeanTools.cluster.norm_dist`\n \"\"\"\n if far is None:\n far = max(a.a/3600 for a in sources)\n l = len(sources)\n distances = np.zeros((l, l), dtype=bool)\n for i in range(l):\n for j in range(i, l):\n if i == j:\n distances[i, j] = False\n continue\n src1 = sources[i]\n src2 = sources[j]\n if src2.dec - src1.dec > far:\n break\n if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far:\n continue\n distances[i, j] = norm_dist(src1, src2) > eps\n distances[j, i] = distances[i, j]\n return distances\n\n\ndef regroup_vectorized(srccat, eps, far=None, dist=norm_dist):\n \"\"\"\n Regroup the islands of a catalog according to their normalised distance.\n\n Assumes srccat is recarray-like for efficiency.\n Return a list of island groups.\n\n Parameters\n ----------\n srccat : np.rec.arry or pd.DataFrame\n Should have the following fields[units]:\n ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]\n eps : float\n maximum normalised distance within which sources are considered to be\n grouped\n far : float\n (degrees) sources that are further than this distance apart will not\n be grouped, and will not be tested.\n Default = 0.5.\n dist : func\n a function that calculates the distance between a source and each\n element of an array of sources.\n Default = :func:`AegeanTools.cluster.norm_dist`\n\n Returns\n -------\n islands : list of lists\n Each island contians integer indices for members from srccat\n (in descending dec order).\n \"\"\"\n if far is None:\n far = 0.5 # 10*max(a.a/3600 for a in srccat)\n\n # most negative declination first\n # XXX: kind='mergesort' ensures stable sorting for determinism.\n # Do we need this?\n order = np.argsort(srccat.dec, kind='mergesort')[::-1]\n # TODO: is it better to store groups as arrays even if appends are more\n # costly?\n groups = [[order[0]]]\n for idx in order[1:]:\n rec = srccat[idx]\n # TODO: Find out if groups are big enough for this to give us a speed\n # gain. If not, get distance to all entries in groups above\n # decmin simultaneously.\n decmin = rec.dec - far\n for group in reversed(groups):\n # when an island's largest (last) declination is smaller than\n # decmin, we don't need to look at any more islands\n if srccat.dec[group[-1]] < decmin:\n # new group\n groups.append([idx])\n rafar = far / np.cos(np.radians(rec.dec))\n group_recs = np.take(srccat, group, mode='clip')\n group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]\n if len(group_recs) and dist(rec, group_recs).min() < eps:\n group.append(idx)\n break\n else:\n # new group\n groups.append([idx])\n\n # TODO?: a more numpy-like interface would return only an array providing\n # the mapping:\n # group_idx = np.empty(len(srccat), dtype=int)\n # for i, group in enumerate(groups):\n # group_idx[group] = i\n # return group_idx\n return groups\n\n\ndef regroup(catalog, eps, far=None, dist=norm_dist):\n \"\"\"\n Regroup the islands of a catalog according to their normalised distance.\n Return a list of island groups. Sources have their (island,source) parameters relabeled.\n\n\n Parameters\n ----------\n catalog : str or object\n Either a filename to read into a source list, or a list of objects with the following properties[units]:\n ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]\n\n eps : float\n maximum normalised distance within which sources are considered to be grouped\n\n far : float\n (degrees) sources that are further than this distance appart will not be grouped, and will not be tested.\n Default = None.\n\n dist : func\n a function that calculates the distance between two sources must accept two SimpleSource objects.\n Default = :func:`AegeanTools.cluster.norm_dist`\n\n Returns\n -------\n islands : list\n A list of islands. Each island is a list of sources.\n\n See Also\n --------\n :func:`AegeanTools.cluster.norm_dist`\n \"\"\"\n\n if isinstance(catalog, str):\n table = load_table(catalog)\n srccat = table_to_source_list(table)\n else:\n try:\n srccat = catalog\n _ = catalog[0].ra, catalog[0].dec, catalog[0].a, catalog[0].b, catalog[0].pa, catalog[0].peak_flux\n\n except AttributeError as e:\n log.error(\"catalog is not understood.\")\n log.error(\"catalog: Should be a list of objects with the following properties[units]:\\n\" +\n \"ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]\")\n raise e\n\n log.info(\"Regrouping islands within catalog\")\n log.debug(\"Calculating distances\")\n\n if far is None:\n far = 0.5 # 10*max(a.a/3600 for a in srccat)\n\n srccat_array = np.rec.fromrecords(\n [(s.ra, s.dec, s.a, s.b, s.pa, s.peak_flux)\n for s in srccat],\n names=['ra', 'dec', 'a', 'b', 'pa', 'peak_flux'])\n groups = regroup_vectorized(srccat_array, eps=eps, far=far, dist=dist)\n groups = [[srccat[idx] for idx in group]\n for group in groups]\n\n islands = []\n # now that we have the groups, we relabel the sources to have (island,component) in flux order\n # note that the order of sources within an island list is not changed - just their labels\n for isle, group in enumerate(groups):\n for comp, src in enumerate(sorted(group, key=lambda x: -1*x.peak_flux)):\n src.island = isle\n src.source = comp\n islands.append(group)\n return islands\n","sub_path":"AegeanTools/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"517858364","text":"\nimport sys\nimport pandas as pd \nimport openpyxl\nimport re\n\nport1=[[5,4],[13,2]]\nport3=[[3,3],[4,1],[5,1],[5,2],[5,3],[5,4],[12,3],[12,4],[13,1],[13,3],[13,4]]\nport4=[[3,3],[5,1],[5,2],[5,3],[5,4],[10,2],[10,3],[10,4],[11,3],[11,4],[13,1]]\nport5=[[4,1],[4,3],[9,1],[10,3],[13,1],[13,2],[13,3],[14,3]]\n\nmodel=re.compile(r'\\d+')\n# global targetAll\ntargetAll=pd.DataFrame(columns=['网元名','槽位','端口','ONU索引','ONU配置类型','认证模式','认证值','vlan1','vlan2','vlan3','vlan4'])\ntmpError=pd.DataFrame()\n\ndef core(it,port):\n\tglobal targetAll\n\tglobal tmpError\n\t#targetAll=pd.DataFrame(columns=['网元名','槽位','端口','ONU索引','ONU配置类型','认证模式','认证值','vlan1','vlan2','vlan3','vlan4'])\n\tpath=\"E:\\\\workpy\\\\trans\\\\\"+str(it)+\".csv\"\n\tvlanPath=\"E:\\\\workpy\\\\trans\\\\\"+str(it)+\"-vlan.csv\"\n\tdf=pd.read_csv(path,encoding='gbk')\n\tdfVlan=pd.read_csv(vlanPath,encoding='gbk')\n\t\n\n\n\n\n#df = df.applymap(lambda x: x.decode('gbk') if isinstance(x, str) else x)\n\n# for index, row in df.iterrows():\n# if row[\"ONU配置类型\"] == 'ZTE-F420':\n# \tprint(index)\n\n# print(df[df[\"ONU配置类型\"]==\"ZTE-F420\"].head())\n#['ONU索引','ONU配置类型','认证模式','认证值']\n\tfor i in port:\n\t\t# print(i[0],i[1])\n\n\t\tdft=df[(df['槽位']==i[0])&(df['端口']==i[1])&((df['ONU配置类型']=='ZTE-F420')|(df['ONU配置类型']=='ZTE-D420'))]\n\t\tdft=dft[['网元名','槽位','端口','ONU索引','ONU配置类型','认证模式','认证值']]\n\n\t\tfor index,row in dft.iterrows():\n\t\t\t# tmpSlot=row['槽位']\n\t\t\t# tmpPort=row['端口']\n\t\t\t# tmpId=row['ONU索引']\n\t\t\t# re.find(r'\\d+',x)\n\t\t\ttry:\n\t\t\t\ttmpVlan=dfVlan.loc[(dfVlan['槽位']==row['槽位'])&(dfVlan['端口']==row['端口'])&(dfVlan['ONU ID']==row['ONU索引'])&(dfVlan['VLAN ID'].str.contains('PVID=')),['VLAN ID']]\n\t\t\t\t#tmpVlan[,['VLAN ID']]='PVID=0,TAGVLAN='\n\n\t\t\t\ttmpVlan=tmpVlan.applymap(lambda x:model.findall(x))#.T#.reindex(columns=vlanColumns)\n\t\t\t\ttmprow=pd.DataFrame(row.values,columns=['a'])\n\t\t\t\ttmpVlan.columns=['a']\n\t\t\t\t# print(tmprow)\n\t\t\t\t# print(tmpVlan)\n\n\t\t\t\ttg=pd.concat([tmprow,tmpVlan]).T\n\t\t\t\t#print(tg)\n\t\t\t\ttg.columns=(['网元名','槽位','端口','ONU索引','ONU配置类型','认证模式','认证值','vlan1','vlan2','vlan3','vlan4'])\n\t\t\t\ttargetAll=targetAll.append(tg)\n\t\t\t\t#print(targetAll)\n\t\t\t\t# tmpVlan.columns=['vlan1','vlan2','vlan3','vlan4']\n\t\t\t\t#targetAll.to_excel(\"sample.xlsx\",index=False)\n\t\t\texcept:\n\t\t\t\ttmpError=tmpError.append(tmprow.T)\n\n\ncore(1,port1)\ncore(3,port3)\ncore(4,port4)\ncore(5,port5)\ntargetAll.to_excel(\"sample.xlsx\",index=False)\ntmpError.to_excel(\"error.xlsx\",index=False)\n\n\n#dft.to_excel(\"sample.xlsx\",index=False)\n\n\n","sub_path":"transtest.py","file_name":"transtest.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"347543049","text":"#to control the servo\n\nimport RPi.GPIO as GPIO \nimport time \nimport sys\n\n\ndef move(angle, timer):\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setup(17, GPIO.OUT)\n\tpwm=GPIO.PWM(17, 50)\n\tpwm.start(5)\n\tGPIO.output(17, True)\n\ttime.sleep(0.1)\n\tduty = 1/18*(int(angle))+2\n\twhile(timer > 0):\n\t\tpwm.ChangeDutyCycle(duty)\n\t\ttimer = timer - 1\n\t\ttime.sleep(0.001)\n\ttime.sleep(2)\n\tpwm.stop()\n\tGPIO.cleanup()\n\n\n#move(sys.argv[1])\nmove(15, 10)\nmove(60, 100)\n\n\n\n","sub_path":"archive/servo_button.py","file_name":"servo_button.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495167776","text":"from django.contrib.auth.models import User, Group\nfrom rest_framework import serializers\nfrom .models import ApartmentModel\n\n\nclass ApartmentSerializer(serializers.ModelSerializer):\n class Meta:\n model = ApartmentModel\n fields = [\n \t'apartment_price', 'number_of_rooms', 'apartment_floor', \n \t'year_of_construction', 'apartment_adress', 'id', 'building_floors'\n ]\n\n\n","sub_path":"apartments/apartments_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"426921952","text":"from operator import attrgetter\nfrom typing import Dict, List\n\nfrom django.core.cache import cache\nfrom django.urls import reverse\n\nfrom pytest import importorskip\n\nfrom MangAdventure.utils import natsort\n\nfrom reader.models import Series\n\nfrom .base import MangadvTestBase\nfrom .utils import get_test_image, get_valid_zip_file\n\n\nclass MangadvViewTestBase(MangadvTestBase):\n def setup_method(self):\n super().setup_method()\n series = Series.objects.create(title='series', cover=get_test_image())\n series.aliases.create(name='first series')\n author = series.authors.create(name='Author')\n author.aliases.create(name='author1')\n artist = series.artists.create(name='Artist')\n artist.aliases.create(name='artist1')\n series.categories.create(name='Manga')\n category = series.categories.create(name='Adventure')\n series.chapters.create(\n title='chapter', number=1, file=get_valid_zip_file()\n )\n\n series2 = Series.objects.create(title='series2', status='completed')\n author2 = series2.authors.create(name='Author 2')\n author2.aliases.create(name='author2')\n artist2 = series2.artists.create(name='Artist 2')\n artist2.aliases.create(name='artist2')\n series2.chapters.create(title='chapter', number=1, final=True)\n series2.categories.create(name='Yaoi')\n series2.categories.add(category)\n\n def teardown_method(self):\n super().teardown_method()\n cache.clear()\n\n\nclass TestIndex(MangadvViewTestBase):\n URL = reverse('index')\n\n def test_get(self):\n r = self.client.get(self.URL)\n assert r.status_code == 200\n\n def test_csp(self):\n importorskip('csp', reason='requires django-csp')\n r = self.client.get(self.URL)\n assert r.status_code == 200\n assert 'Content-Security-Policy' in r\n assert 'unsafe-inline' not in r['Content-Security-Policy']\n\n\nclass TestSearch(MangadvViewTestBase):\n URL = reverse('search')\n\n def _test_filter(self, params: Dict[str, str] = {},\n results: List[str] = []):\n cache.clear()\n r = self.client.get(self.URL, params)\n assert r.status_code == 200\n if bool(params and results):\n values = map(attrgetter('title'), r.context['results'])\n assert natsort(values) == results\n\n def test_get_simple(self):\n self._test_filter()\n\n def test_get_query(self):\n self._test_filter({'q': 'first'}, ['series'])\n\n def test_get_author(self):\n self._test_filter({'author': 'author1'}, ['series'])\n self._test_filter({'author': 'artist1'}, ['series'])\n self._test_filter({'author': 'author2'}, ['series2'])\n self._test_filter({'author': 'artist2'}, ['series2'])\n\n def test_get_status(self):\n self._test_filter({'status': ''}, [])\n self._test_filter({'status': 'ongoing'}, ['series'])\n self._test_filter({'status': 'completed'}, ['series2'])\n self._test_filter({'status': 'any'}, ['series', 'series2'])\n\n def test_get_categories(self):\n self._test_filter({'categories': 'adventure'}, ['series', 'series2'])\n self._test_filter({'categories': 'manga'}, ['series'])\n self._test_filter({'categories': 'yaoi'}, ['series2'])\n self._test_filter({'categories': '-yaoi,adventure'}, ['series'])\n\n\nclass TestOpenSearch(MangadvViewTestBase):\n URL = reverse('opensearch')\n\n def test_get(self):\n r = self.client.get(self.URL)\n assert r.status_code == 200\n assert r['Content-Type'] == 'application/opesearchdescription+xml'\n assert 'MangAdventure' in str(r.content)\n\n\nclass TestContribute(MangadvViewTestBase):\n URL = reverse('contribute')\n\n def test_get(self):\n r = self.client.get(self.URL)\n assert r.status_code == 200\n assert r['Content-Type'] == 'application/json'\n assert r.json()['name'] == 'MangAdventure'\n\n\nclass TestManifest(MangadvViewTestBase):\n URL = reverse('manifest')\n\n def test_get(self):\n r = self.client.get(self.URL)\n assert r.status_code == 200\n assert r['Content-Type'] == 'application/manifest+json'\n assert r.json()['name'] == 'MangAdventure'\n","sub_path":"MangAdventure/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264487664","text":"import os\nimport shutil\nimport sys\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nimport hpms_utils\n\n\ndef backup(output_path):\n import os\n if not os.path.isdir(output_path):\n return\n from datetime import datetime\n today = str(datetime.today().strftime('%Y%m%d%H%M%S'))\n bak_scripts_path = output_path + \"_\" + today\n shutil.make_archive(bak_scripts_path, 'zip', output_path)\n import hpms_utils\n hpms_utils.debug(\"Last project build backup done\")\n\n\ndef unzip_templates(output_path):\n shutil.unpack_archive(hpms_utils.get_current_dir() + \"/templates/data.zip\", output_path, 'zip')\n\n\ndef create_empty_project(output_path):\n hpms_utils.debug(\"Creating default project template.\")\n if os.path.isdir(output_path):\n shutil.rmtree(output_path, ignore_errors=True)\n os.makedirs(output_path)\n unzip_templates(output_path)\n\n\ndef main():\n import datetime\n try:\n print(\"\\n\\n\\n---------------------------------------------\")\n print(\"----------------- STARTED -------------------\")\n print(\"---------------------------------------------\\n\")\n hpms_utils.system(\"HPMS batch starting\")\n starting = datetime.datetime.now()\n start()\n ending = datetime.datetime.now() - starting\n hpms_utils.system(\"HPMS batch completed successfully in \" + str(ending.total_seconds()) + \" seconds\")\n print(\"\\n---------------------------------------------\")\n print(\"----------------- FINISHED ------------------\")\n print(\"---------------------------------------------\\n\\n\\n\")\n except Exception as e:\n hpms_utils.severe(\"Unexpected error: \" + str(e))\n hpms_utils.system(\"HPMS batch aborted\")\n print(\"\\n---------------------------------------------\")\n print(\"----------------- ABORTED -------------------\")\n print(\"---------------------------------------------\\n\\n\\n\")\n\n\ndef start():\n import argparse\n\n argv = sys.argv\n\n if \"--\" not in argv:\n argv = []\n else:\n argv = argv[argv.index(\"--\") + 1:]\n\n usage_text = (\n \"Run HPMS project builder with this script:\"\n \" blender --background --python \" + __file__ + \" -- [options]\"\n )\n\n parser = argparse.ArgumentParser(description=usage_text)\n\n parser.add_argument(\n \"-v\", \"--logging-level\", dest=\"logging\",\n help=\"Setting logging level (severe if not specified).\",\n )\n\n parser.add_argument(\n \"-o\", \"--output\", dest=\"output_path\", metavar='FILE',\n help=\"Generate HPMS into specified output path.\",\n )\n\n parser.add_argument(\n \"-c\", \"--cleanup\", dest=\"cleanup\",\n help=\"Cleanup the output directory (update otherwise).\",\n )\n\n parser.add_argument(\n \"-r\", \"--render\", dest=\"render\",\n help=\"Render missing screens and masks.\",\n )\n\n parser.add_argument(\n \"-l\", \"--roomupdate-list\", dest=\"roomupdate_list\",\n help=\"Force update for rooms in given list (comma separated).\",\n )\n\n parser.add_argument(\n \"-a\", \"--roomupdate-all\", dest=\"roomupdate_all\",\n help=\"Force update for all rooms.\",\n )\n\n parser.add_argument(\n \"-p\", \"--preview\", dest=\"preview\",\n help=\"Improve rendering speed with only 16 samples.\",\n )\n\n args = parser.parse_args(argv)\n\n if not argv:\n parser.print_help()\n return\n\n hpms_utils.set_log_level(args.logging)\n\n if not args.output_path:\n hpms_utils.severe(\"Parameter '--outputpath' is missing, aborting\")\n parser.print_help()\n return\n\n if not os.path.isdir(args.output_path):\n create_empty_project(args.output_path)\n else:\n backup(args.output_path)\n\n if args.cleanup is not None and args.cleanup.lower() in [\"t\", \"true\", \"y\", \"yes\"]:\n hpms_utils.debug(\"Re-building project.\")\n create_empty_project(args.output_path)\n\n update_all = args.roomupdate_all is not None and args.roomupdate_all.lower() in [\"t\", \"true\", \"y\", \"yes\"]\n do_render = args.render is not None and args.render.lower() in [\"t\", \"true\", \"y\", \"yes\"]\n preview = args.preview is not None and args.preview.lower() in [\"t\", \"true\", \"y\", \"yes\"]\n room_list = []\n if args.roomupdate_list is not None:\n room_list = args.roomupdate_list.split(\",\")\n import hpms_exporter\n hpms_exporter.export_room_data(args.output_path, room_list, update_all, do_render, preview)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"batch/bdata/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"76464449","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.views.generic import UpdateView\n\nfrom users.forms import KitchenForm, MenuForm\nfrom .models import Provider, Kitchen, MenuItem\nfrom django.http import Http404\n\n\n# Create your views here.\n\n\ndef kitchen_list(request):\n kitchens = Kitchen.objects.all()\n\n menuitems = []\n\n if not request.user.first_name:\n user = request.user.username\n\n provider = Provider.objects.get(name=user)\n\n kitchens = Kitchen.objects.get(provider=provider)\n\n menuitems = MenuItem.objects.filter(kitchen=kitchens)\n\n return render(request, 'list.html', {'kitchens': kitchens, 'menuitems': menuitems})\n\n\ndef kitchen_detail(request, kitchen_name):\n try:\n kitchen = Kitchen.objects.get(name=kitchen_name)\n menuitems = MenuItem.objects.filter(kitchen=kitchen)\n except Kitchen.DoesNotExist:\n raise Http404(\"Kitchen Does Not Exist\")\n\n return render(request, 'detail.html', {'kitchen': kitchen, 'menuitems': menuitems})\n\n\nclass EditKitchenView(UpdateView):\n model = Kitchen\n form_class = KitchenForm\n template_name = \"kitchen_edit.html\"\n\n def get_object(self):\n user = self.request.user.username\n\n provider = Provider.objects.get(name=user)\n\n kitchen = Kitchen.objects.get(provider=provider)\n\n return kitchen\n\n def get_success_url(self):\n return reverse('kitchen_list')\n\n\ndef remove_item(request, menuitem_id):\n MenuItem.objects.filter(id=menuitem_id).delete()\n\n return redirect('kitchen_list')\n\n\ndef edit_item(request, menuitem_id):\n\n menuitem = MenuItem.objects.get(id=menuitem_id)\n form = MenuForm(request.POST or None,instance=menuitem)\n\n if form.is_valid():\n menuitem=form.save(commit=False)\n menuitem.save()\n return redirect('kitchen_list')\n\n return render(request, 'item_edit.html', {'form': form})\n\n\ndef add_item(request):\n\n if request.method == 'POST':\n form = MenuForm(request.POST)\n\n if form.is_valid():\n user = request.user.username\n\n provider = Provider.objects.get(name=user)\n\n kitchen = Kitchen.objects.get(provider=provider)\n\n menuitem=form.save(commit=False)\n menuitem.kitchen = kitchen\n menuitem.save()\n return redirect('kitchen_list')\n else:\n form = MenuForm()\n\n return render(request, 'item_add.html', {'form': form})\n","sub_path":"kitchen/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"597983539","text":"import os\nimport subprocess\nfrom pathlib import Path\nimport platform\n\n\nPLATFORM = platform.system().lower()\n\nSCRIPTS_DIR = Path(__file__).parent\nROOT = SCRIPTS_DIR.parent\n\nARTIFACTS = ROOT / \"_artifacts\"\nCACHE = ROOT / \"_cache\"\n\nCONSTRUCT_DIR = ROOT / \"constructor\"\n\nRECIPE_DIR = ROOT / \"recipes\"\nCONDA_CACHE = CACHE / \"conda-bld\"\n\nCONSTRUCT_CACHE = CACHE / \"constructor\"\nCONSTRUCT = CONSTRUCT_DIR / \"construct.yaml\"\n\nTEST_DIR = ROOT / \"tests\"\n\nCONDA_OUT = ARTIFACTS / \"conda-bld\"\nCONSTRUCT_OUT = ARTIFACTS / \"constructor\"\nTEST_OUT = ARTIFACTS / \"test_output\"\n\nROBOTLAB_DIR = ROOT / \"robotlab\"\n\nREADME = ROOT / \"README.md\"\n\n# for easy overriding in CI\nPY_MIN = os.environ.get(\"PY_MIN\", \"3.6\")\nPY_MAX = os.environ.get(\"PY_MAX\", \"3.7\")\nNODE_MIN = os.environ.get(\"NODE_MIN\", \"8\")\nNODE_MAX = os.environ.get(\"NODE_MAX\", \"9\")\nRF_VERSION = os.environ.get(\"ROBOTFRAMEWORK_VERSION\", \"3.1\")\nVERSION = os.environ.get(\"ROBOTLAB_VERSION\", \"0.9.0\")\nCHROMEDRIVER_VERSION = os.environ.get(\"CHROMEDRIVER_VERSION\", \"2.45\")\nIPYWIDGETS_VERSION = os.environ.get(\"CHROMEDRIVER_VERSION\", \"7.4.2\")\n\n\ndef run(args, **kwargs):\n \"\"\" Probably unneccessary \"convenience\" wrapper\n \"\"\"\n p = subprocess.Popen(list(map(str, args)), **kwargs)\n\n try:\n p.wait()\n except KeyboardInterrupt as err:\n p.kill()\n raise err\n\n return p.returncode\n","sub_path":"scripts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"153639066","text":"import argparse\n\nimport retro\nimport numpy as np\nimport time\nimport random\nimport pickle\nimport os\nfrom multiprocessing import Pool, cpu_count\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-l', '--level', type=int, default=1, help=\"the starting level\")\nparser.add_argument('-r', '--random_run_times', type=int, default=1, help=\"rollout running times\")\nparser.add_argument('-s', '--single_state_run_times', type=int, default=20, help=\"expansion times in each tree level\")\nargs = parser.parse_args()\n\nmax_enemies = [-1, 3, 4, 4, 6, 4, 4, 4, 4, 7, 7, 7, 6, 7, 7, 7, 7, 7, 6, 7, 5, 7, 4, 5, 7, 6, 7, 7, 7, 6, 7, 7, 7, 6,\n 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 7, 7, 7, 7, 7, 6, 5, 7, 7, 4, 6, 7, 4, 6, 7, 7, 5,\n 7, 7, 5, 7, 7, 7, 4, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 4, 7, 7, 7, 4, 6, 7, 7, 7, 7, 5, 6, 7, 7, 7]\naction_space_size = 6\nrandom_run_times = args.random_run_times\nexploration_const = np.sqrt(0.5)\n# level_up_collection_number = int(1e3)\nloop_times_per_state = args.single_state_run_times\nlevel_up_infos = []\n\nclass State:\n def __init__(self, game_name='BubbleBobble-Nes', start_lvl=1, env=None, acts=None, root=None, parent=None):\n # root node init.\n if acts is None:\n acts = []\n if root is None:\n root = self\n if env is None:\n env = retro.make(game=game_name, state='Level{:02d}'.format(start_lvl))\n env.reset()\n # game info.\n self.game_name = env.gamename\n self.start_lvl = start_lvl\n self.root = root\n self.acts = acts\n self.env_state = env.em.get_state()\n self.enemies, self.level, self.lives, self.score = env.data.lookup_all().values()\n env.close()\n # state eval.\n self.terminal = 0\n self.value = 0.\n if self.acts:\n self.terminal_cal()\n self.value_cal()\n # if self.terminal == 2:\n # print('Level up from level {}.'.format(self.start_lvl))\n # print('acts:', acts)\n # tree info.\n self.parent = parent\n self.children = [None] * action_space_size\n self.visit_times = 0\n self.mean_score = 0\n # print(self)\n\n def terminal_cal(self): # 0:Non-terminal, 1:lives down, 2:level up\n if self.lives < self.root.lives:\n self.terminal = 1\n elif self.level > self.root.level:\n self.terminal = 2\n else:\n self.terminal = 0\n\n def value_cal(self):\n if self.terminal == 1:\n self.value = -1.\n elif self.terminal == 2:\n self.value = 1.\n else:\n self.value = 1. - (self.enemies / max_enemies[self.level]) ** 2\n\n def selection_p(self):\n if self.terminal:\n return 0.\n return self.mean_score + exploration_const * np.sqrt(np.log(self.parent.visit_times) / self.visit_times)\n\n def full_children(self):\n return all(self.children)\n\n def best_children(self):\n best_children_index = np.argmax([c.selection_p() for c in self.children])\n return self.children[best_children_index]\n\n def selection(self):\n cur = self\n while cur:\n if cur.full_children():\n p_list = [c.selection_p() for c in cur.children]\n cur = cur.children[uneven_random(p_list)]\n else:\n return cur.next() # expansion, simulation, and back-propagation\n\n def next(self): # expansion in standard MCTS\n choice_array = [i for i in range(action_space_size) if not self.children[i]]\n action_index = random.choice(choice_array)\n action_buffer = action(action_index)\n env = retro.make(game=self.game_name)\n env.reset()\n env.em.set_state(self.env_state)\n while action_buffer:\n obs, rew, done, info = env.step(action_buffer.pop())\n next_state = State(self.game_name, self.start_lvl, env, self.acts + [action_index], self.root, self)\n next_state.random_run()\n self.children[action_index] = next_state\n return next_state\n\n def random_run(self): # simulation & back-propagation in standard MCTS\n env = retro.make(game=self.game_name)\n env.reset()\n res_list = []\n for i in range(random_run_times):\n action_done = []\n action_buffer = []\n action_index = None\n env.reset()\n env.em.set_state(self.env_state) # start from the current state\n terminal = False\n while not terminal:\n if not action_buffer:\n if action_index is None: # first step\n action_index = random.randint(0, 5)\n action_buffer += action(action_index)\n else:\n action_done.append(action_index)\n new_enemies, new_level, new_lives, new_score = env.data.lookup_all().values()\n if new_level > self.root.level or new_lives < self.root.lives: # level up or lives down, terminal\n terminal_info = (self.acts + action_done, new_enemies, new_level, new_lives, new_score)\n res_list.append(terminal_info)\n if new_level > self.level: # record level-up infos\n # print(terminal_info)\n level_up_infos.append(terminal_info)\n terminal = True\n action_index = random.randint(0, 5)\n action_buffer += action(action_index)\n obs, rew, done, info = env.step(action_buffer.pop())\n env.close()\n # back-propagation\n cur = self\n while cur:\n terminal_score_mean = sum(info[-1] for info in res_list) / random_run_times\n cur.visit_times, cur.mean_score = cur.visit_times + 1, (\n cur.visit_times * cur.mean_score + terminal_score_mean) / (cur.visit_times + 1)\n cur = cur.parent\n\n def __eq__(self, other):\n return self.start_lvl == other.start_lvl and self.acts == other.acts\n\n def __hash__(self):\n return hash(str(self.start_lvl) + str(self.acts))\n\n def __repr__(self):\n return 'Env starting from level {}, using {} steps, current state: enemies={}, level={}, lives={}, score={}, ' \\\n 'parent id={}' \\\n .format(self.start_lvl, len(self.acts), self.enemies, self.level, self.lives, self.score, hash(self.parent))\n\n\ndef uneven_random(p_list):\n range_list = [0]\n for p in p_list:\n range_list.append(range_list[-1] + p)\n rand_num = random.uniform(0, range_list[-1])\n for i in range(len(p_list)):\n if range_list[i] <= rand_num < range_list[i + 1]:\n return i\n return len(p_list) - 1\n\n\ndef action_one_step(n):\n # 0: B(Fire), 1: Left, 2: Right, 3: A(Jump), 4: Left Jump, 5: Right Jump\n if n == 0:\n return np.int8([1, 0, 0, 0, 0, 0, 0, 0, 0])\n if n == 1:\n return np.int8([1, 0, 0, 0, 0, 0, 1, 0, 0])\n if n == 2:\n return np.int8([1, 0, 0, 0, 0, 0, 0, 1, 0])\n if n == 3:\n return np.int8([1, 0, 0, 0, 0, 0, 0, 0, 1])\n if n == 4:\n return np.int8([1, 0, 0, 0, 0, 0, 1, 0, 1])\n if n == 5:\n return np.int8([1, 0, 0, 0, 0, 0, 0, 1, 1])\n return np.int8([0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n\ndef action(n, repeat=5):\n # 0: B(Fire), 1: Left, 2: Right, 3: A(Jump), 4: Left Jump, 5: Right Jump\n return ([action_one_step(-1)] + [action_one_step(n)]) * repeat\n\n\ndef retro_search(process_id, game_name='BubbleBobble-Nes', level=1):\n print(process_id, 'started.')\n root_state = State(game_name=game_name, start_lvl=level)\n cur_state = root_state\n best_score = 0\n while not cur_state.terminal:\n print('cur_state', cur_state)\n for _ in range(loop_times_per_state):\n cur_state.selection()\n cur_state = cur_state.best_children()\n if level_up_infos:\n best_index = int(np.argmax([info[-1] for info in level_up_infos]))\n cur_best_score = level_up_infos[best_index][-1]\n print(process_id, level_up_infos[best_index][1:])\n # print(level_up_infos[best_index][0])\n if cur_best_score > best_score:\n best_score = cur_best_score\n cur_acts = level_up_infos[best_index][0]\n cur_level = level_up_infos[best_index][2]\n file_name = 'saved_acts_MCTS/lvl{}to{}_score{}_acts.pickle'.format(level, cur_level, best_score)\n with open(file_name, 'wb') as handle:\n pickle.dump(cur_acts, handle)\n print(process_id, 'exited.')\n\n\nif __name__ == '__main__':\n os.makedirs('saved_acts_MCTS', exist_ok=True)\n retro_search(0, level=args.level)\n # cpu_num = cpu_count()\n # pool_size = cpu_num // 2\n # search_level_list = range(args.level, 100)\n # search_level_num = len(search_level_list)\n # p = Pool(pool_size)\n # os.makedirs('saved_acts_MCTS', exist_ok=True)\n # for i in range(search_level_num):\n # p.apply_async(retro_search, (i + 1, 'BubbleBobble-Nes', i + 1))\n # p.close()\n # p.join()\n","sub_path":"retro_MCTS.py","file_name":"retro_MCTS.py","file_ext":"py","file_size_in_byte":9227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"376905800","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndef get_params(sess):\r\n model_params = sess.run(tf.trainable_variables())\r\n return model_params\r\n\r\ndef set_params(model_params, sess):\r\n if model_params is not None: \r\n all_vars = tf.trainable_variables()\r\n for variable, value in zip(all_vars, model_params):\r\n variable.load(value, sess)\r\n\r\ndef aggregate(wsolns):\r\n total_weight = 0.0\r\n base = [0]*len(wsolns[0][1])\r\n for (w, soln) in wsolns: # w is the number of local samples\r\n total_weight += w\r\n for i, v in enumerate(soln):\r\n base[i] += w*v.astype(np.float32)\r\n\r\n averaged_soln = [v / total_weight for v in base]\r\n\r\n return averaged_soln\r\ndef train(model_instance, train_source_loader_client1, train_source_loader_client2, train_target_loader_client1, train_target_loader_client2, test_target_loader,\r\n max_iter, num_local_steps, optimizer, lr, decay_epoch, lr_placeholder, eval_interval, batch_eval, lambda1, lambdat, lambda1_decay, batch_size, sess):\r\n model_instance.set_train(True)\r\n model = model_instance\r\n print(\"start train...\")\r\n iter_num = 0\r\n epoch = 0\r\n# total_progress_bar = tqdm.tqdm(desc='Train iter', total=max_iter)\r\n label_acc = model_instance.label_acc\r\n train_op = model_instance.loss_training\r\n X0, y0 = next(train_target_loader_client1)\r\n X1, y1 = next(train_source_loader_client1)\r\n client1_target_batch_size = np.shape(X0)[0]\r\n client1_source_batch_size = np.shape(X1)[0]\r\n client1_domain_labels = np.concatenate([np.tile([0., 1.], [client1_target_batch_size, 1]), np.tile([1., 0.], [client1_source_batch_size, 1])], axis =0)\r\n X2, y2 = next(train_target_loader_client2)\r\n X3, y3 = next(train_source_loader_client2)\r\n client2_target_batch_size = np.shape(X2)[0]\r\n client2_source_batch_size = np.shape(X3)[0]\r\n client2_domain_labels = np.concatenate([np.tile([0., 1.], [client2_target_batch_size, 1]), np.tile([1., 0.], [client2_source_batch_size, 1])], axis =0)\r\n\r\n client_params = []\r\n train_record = []\r\n variable_set = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\r\n for i in range(2):\r\n client_model = []\r\n for var in variable_set:\r\n client_model.append(np.zeros(shape = var.get_shape().as_list(), dtype = np.float32))\r\n client_params.append(client_model)\r\n\r\n\r\n lr_t = lr\r\n latest_model = get_params(sess)\r\n for i in range(max_iter):\r\n# print('====================Round {0}===================='.format(i)) \r\n # Adaptation param and learning rate schedule as described in the paper\r\n\r\n lambdat_i = lambda1 * (lambda1_decay ** (i/100))\r\n\r\n \r\n\r\n\r\n # Training step\r\n if i == decay_epoch[0] or i == decay_epoch[1]:\r\n lr_t = lr_t * 0.1\r\n set_params(latest_model, sess)\r\n optimizer.set_params(latest_model, sess)\r\n\r\n# sess.run(grads_zeros_ops)\r\n for j in range(num_local_steps):\r\n X0, y0 = next(train_target_loader_client1)\r\n X1, y1 = next(train_source_loader_client1)\r\n X = np.concatenate([X0, X1], axis=0)\r\n# print(y0)\r\n _ = sess.run(\r\n [train_op],\r\n feed_dict={model.inputs: X, model.labels: y1, model.domain: client1_domain_labels,\r\n lr_placeholder: lr_t, lambdat:lambda1 })\r\n\r\n client_params[0] = get_params(sess)\r\n\r\n\r\n set_params(latest_model, sess)\r\n\r\n optimizer.set_params(latest_model, sess)\r\n# sess.run(grads_zeros_ops)\r\n for j in range(num_local_steps):\r\n X2, y2 = next(train_target_loader_client2)\r\n X3, y3 = next(train_source_loader_client2)\r\n X = np.concatenate([X2, X3], axis=0)\r\n _= sess.run(\r\n [train_op],\r\n feed_dict={model.inputs: X, model.labels: y3, model.domain: client2_domain_labels,\r\n lr_placeholder: lr_t, lambdat:lambda1} )\r\n client_params[1] = get_params(sess)\r\n\r\n latest_model = aggregate([(w , soln) for w, soln in zip(np.ones(2)/2, client_params)])\r\n if i % eval_interval == 0:\r\n print('====================Round {0}===================='.format(i)) \r\n# source_acc = sess.run(label_acc,\r\n# feed_dict={model.X: mnist_test, model.y: mnist.test.labels,\r\n# model.train: False})\r\n model_instance.set_train(False)\r\n if batch_eval :\r\n target_acc_all = 0\r\n for k in range(10):\r\n target_test_data, target_test_label = next(test_target_loader)\r\n # print(np.shape(target_test_data))\r\n # print(np.shape(target_test_label))\r\n target_acc = sess.run(label_acc,\r\n feed_dict={model.inputs: target_test_data, model.labels: target_test_label,\r\n })\r\n target_acc_all+= target_acc\r\n target_acc_all = target_acc_all / 10\r\n else :\r\n target_test_data, target_test_label = test_target_loader\r\n target_acc_all = sess.run(label_acc,\r\n feed_dict={model.inputs: target_test_data, model.labels: target_test_label,\r\n })\r\n model_instance.set_train(True)\r\n# print('Source (MNIST) accuracy:', source_acc)\r\n print('Target accuracy:', target_acc_all)\r\n train_record.append({'acc': target_acc_all})\r\n\r\n # val\r\n return train_record\r\n\r\n # print('finish train')\r\n # train_df = pd.DataFrame.from_records(train_record)\r\n # train_df.to_csv('acc_result/fedpd_acc.csv')","sub_path":"trainer/fedprox.py","file_name":"fedprox.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"191960713","text":"\"\"\"Module with methods to handle extraction of zip files.\"\"\"\nimport subprocess\n\n\ndef unzip(source: str, destination: str):\n \"\"\"Extract given compress file from given source to given destination.\n\n Parameters\n ---------------------------\n source: str,\n The compress file.\n destination: str,\n The destination for the compress file.\n \"\"\"\n command = \"unzip -q {source} -d {destination}\".format(\n source=source,\n destination=destination\n )\n subprocess.call(command, shell=True)\n","sub_path":"ensmallen_experiments/utils/unzip.py","file_name":"unzip.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"5152582","text":"\"\"\"\ntest init.py\n\"\"\"\nimport unittest\nfrom init import *\n\n\nclass TestInit(unittest.TestCase):\n \"\"\"test init.py\"\"\"\n\n def test_init_map1(self):\n \"\"\"test init_map method\"\"\"\n if init_map(10, 10) == -1:\n sign = init_map(10, 10)\n else:\n sign = 1\n self.assertEqual(1, sign)\n\n def test_init_map2(self):\n \"\"\"test init_map method\"\"\"\n if init_map(0, 0) == -1:\n sign = init_map(0, 0)\n else:\n sign = 1\n self.assertEqual(-1, sign)\n\n def test_init_map3(self):\n \"\"\"test init_map method\"\"\"\n if init_map(-1, -1) == -1:\n sign = init_map(-1, -1)\n else:\n sign = 1\n self.assertEqual(-1, sign)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"284686849","text":"import unittest\n\nimport properties\n\nfrom tempfile import NamedTemporaryFile\nimport os\n\nclass SimpleTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_SimpleProperties(self):\n f = NamedTemporaryFile(delete=False)\n f.write(\"foo = bar\" + \"\\n\")\n f.write(\"this = is a test of space-containing values\" + \"\\n\")\n f.write(\"fools : rush in\" + \"\\n\")\n f.write(\"more = lines\" + \"\\n\")\n f.write(\"space = around the separator\" + \"\\n\")\n f.write(\"this-line-has-no-value\" + \"\\n\")\n f.close()\n\n reader = properties.Properties()\n reader.read(f.name)\n\n self.assertEqual(reader['foo'], 'bar')\n self.assertEqual(reader['this'], 'is a test of space-containing values')\n self.assertEqual(reader['fools'], 'rush in')\n self.assertEqual(reader['more'], 'lines')\n self.assertEqual(reader['space'], 'around the separator')\n self.assertEqual(reader['this-line-has-no-value'], '')\n\n os.unlink(f.name)\n self.assertFalse(os.path.exists(f.name))\n\n\n def test_MissingProperties(self):\n f = NamedTemporaryFile(delete=False)\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.close()\n\n reader = properties.Properties()\n reader.read(f.name)\n\n self.assertEqual(reader['why'], None)\n self.assertEqual(reader['would anything'], None)\n self.assertEqual(reader['be : in'], None)\n self.assertEqual(reader['the = reader'], None)\n self.assertEqual(reader['????'], None)\n\n os.unlink(f.name)\n self.assertFalse(os.path.exists(f.name))\n\n def test_SpaceSeparatedProperties(self):\n f = NamedTemporaryFile(delete=False)\n f.write(\"foo bar\" + \"\\n\")\n f.write(\"if there isn't a colon or equals, the first space will be the separator\" + \"\\n\")\n f.close()\n\n reader = properties.Properties()\n reader.read(f.name)\n\n self.assertEqual(reader['foo'], 'bar')\n self.assertEqual(reader['if'], 'there isn\\'t a colon or equals, the first space will be the separator')\n\n os.unlink(f.name)\n self.assertFalse(os.path.exists(f.name))\n\n\n def test_CommentsAndEmptyLines(self):\n f = NamedTemporaryFile(delete=False)\n f.write(\"! comment\" + \"\\n\")\n f.write(\"# comment starting with poind sign\" + \"\\n\")\n f.write(\"###### indentation : doesn't matter\" + \"\\n\")\n f.write(\"!! !!! !!! hello = bozo\" + \"\\n\")\n f.write(\"\" + \"\\n\")\n f.write(\"\" + \"\\n\")\n f.write(\"! hiding = good\" + \"\\n\")\n f.write(\"\" + \"\\n\")\n f.close()\n\n reader = properties.Properties()\n reader.read(f.name)\n\n self.assertEqual(reader['hello'], None)\n self.assertEqual(reader['indentation'], None)\n self.assertEqual(reader['hiding'], None)\n\n os.unlink(f.name)\n self.assertFalse(os.path.exists(f.name))\n\n\n def test_Indentation(self):\n f = NamedTemporaryFile(delete=False)\n f.write(\" ! indented comment\" + \"\\n\")\n f.write(\" # indented comment starting with poind sign\" + \"\\n\")\n f.write(\" indentation : doesn't matter\" + \"\\n\")\n f.write(\" hello = bozo\" + \"\\n\")\n f.close()\n\n reader = properties.Properties()\n reader.read(f.name)\n\n self.assertEqual(reader['hello'], 'bozo')\n self.assertEqual(reader['indentation'], 'doesn\\'t matter')\n\n os.unlink(f.name)\n self.assertFalse(os.path.exists(f.name))\n\n\n","sub_path":"test/simple_test.py","file_name":"simple_test.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"445889070","text":"modules = []\nwith open('input.txt') as f:\n modules = [int(x.strip()) for x in f.readlines()]\n\ndef fuel(x):\n tmp = 0\n modfuel = 0\n while tmp > -1:\n tmp = int(x/3)-2\n if tmp > 0:\n modfuel += tmp\n x = tmp\n return modfuel\n\ndef runfuel():\n totalfuel = 0\n for x in modules:\n totalfuel += fuel(x)\n return totalfuel\n\nif __name__ == '__main__':\n print('Part 1: ', sum([int(x/3-2) for x in modules]))\n print('Part 2: ', runfuel())\n","sub_path":"day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"331727549","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom django.conf.urls import patterns, url\nfrom django.contrib import admin\nfrom django.db.models.aggregates import Sum, Count\nfrom django.utils.http import urlencode\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import reverse\n\nfrom djchoices import DjangoChoices, ChoiceItem\n\nfrom bluebottle.payments.models import OrderPayment\nfrom bluebottle.utils.utils import StatusDefinition\n\nfrom .models import BankTransactionCategory\nfrom .signals import match_transaction_with_payout\nfrom ..csvimport.admin import IncrementalCSVImportMixin\n\nfrom .models import BankTransaction, RemoteDocdataPayment, RemoteDocdataPayout, BankTransactionTenant\nfrom .forms import (\n BankTransactionImportForm, DocdataPaymentImportForm,\n update_remote_docdata_status,\n bulk_update_remote_docdata_spanning_multiple_weeks\n)\nfrom .admin_extra import (\n DocdataPaymentMatchedListFilter, OrderPaymentMatchedListFilter,\n OrderPaymentIntegrityListFilter, IntegrityStatusListFilter\n)\nfrom .admin_views import (\n UnknownTransactionView, CreateProjectPayoutJournalView,\n CreateOrganizationPayoutJournalView, CreateManualDonationView,\n RetryPayoutView, RDPTakeCutView\n)\n\n\nadmin.site.register(BankTransactionCategory)\n\nadmin.site.register(BankTransactionTenant)\n\nclass BankTransactionAdmin(IncrementalCSVImportMixin, admin.ModelAdmin):\n date_hierarchy = 'book_date'\n\n actions = ('find_matches', )\n\n search_fields = [\n 'counter_account', 'counter_name',\n 'description1', 'description2', 'description3', 'description4',\n 'description5', 'description6',\n 'amount', 'status_remarks'\n ]\n\n list_display = [\n 'book_date', 'counter_name', 'counter_account', 'credit_debit', 'amount', 'status',\n 'status_remarks', 'show_actions', 'category', 'tenant'\n ]\n\n list_filter = [\n 'tenant', 'credit_debit', 'book_date', 'category', 'sender_account', IntegrityStatusListFilter,\n ]\n\n raw_id_fields = ('payout', 'remote_payout', 'remote_payment')\n\n readonly_fields = ('payout_link', 'remote_payout_link', 'remote_payment_link',\n 'counter_name', 'counter_account', 'sender_account',\n 'description1', 'description2', 'description3',\n 'description4', 'description5', 'description6',\n 'credit_debit', 'currency', 'book_code', 'book_date', 'interest_date',\n 'amount', 'filler', 'end_to_end_id', 'id_recipient', 'mandate_id',)\n\n fieldsets = (\n (None, {\n 'fields': ('tenant', 'status', 'status_remarks', 'category')\n }),\n ('Data', {\n 'fields': readonly_fields\n }),\n )\n\n import_form = BankTransactionImportForm\n ordering = ('-book_date',)\n\n def queryset(self, request):\n return super(BankTransactionAdmin, self).queryset(request).select_related('payout', 'remote_payout')\n\n def payout_link(self, obj):\n object = obj.payout\n url = reverse('admin:%s_%s_change' % (object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"%s (%s)\" % (str(url), object, object.amount)\n payout_link.allow_tags = True\n\n def remote_payout_link(self, obj):\n object = obj.remote_payout\n url = reverse('admin:%s_%s_change' % (object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"%s Payout amount %s\" % (str(url), object, object.payout_amount)\n remote_payout_link.allow_tags = True\n\n def remote_payment_link(self, obj):\n object = obj.remote_payment\n url = reverse('admin:%s_%s_change' % (object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"%s Amount collected %s\" % (str(url), object, object.amount_collected)\n remote_payout_link.allow_tags = True\n\n def show_actions(self, obj):\n \"\"\"\n Collect the possible actions depending on ``obj.status``.\n \"\"\"\n is_credit = obj.credit_debit == BankTransaction.CreditDebit.credit\n actions = {\n # 'amount_mismatch': '%s' % _('todo'),\n BankTransaction.IntegrityStatus.UnknownTransaction: (\n '%s' % (\n reverse('admin:banktransaction-unknown', kwargs={'pk': obj.pk}),\n _('manual entry')\n ),\n '%s' % (\n reverse('admin:banktransaction-add-manualdonation', kwargs={'pk': obj.pk}),\n _('create donation')\n ) if is_credit else None,\n '%s' % (\n reverse('admin:banktransaction-retry-payout', kwargs={'pk': obj.pk}),\n _('retry payout')\n ) if is_credit else None\n )\n }\n actions = [a for a in (actions.get(obj.status) or []) if a is not None]\n return \" • \".join(actions)\n show_actions.allow_tags = True\n show_actions.short_description = _('actions')\n\n def find_matches(self, request, queryset):\n #\n for transaction in queryset.all():\n match_transaction_with_payout(transaction)\n find_matches.short_description = _(\"Try to match with payouts.\")\n\n def get_urls(self):\n \"\"\"\n Extra urls to save manual entries.\n \"\"\"\n urls = super(BankTransactionAdmin, self).get_urls()\n action_urls = patterns(\n '',\n url(\n r'^(?P\\d+)/unknown_transaction/$',\n self.admin_site.admin_view(UnknownTransactionView.as_view()),\n name='banktransaction-unknown'\n ),\n url(\n r'^(?P\\d+)/unknown_transaction/projectpayout/$',\n self.admin_site.admin_view(CreateProjectPayoutJournalView.as_view()),\n name='banktransaction-add-projectpayoutjournal'\n ),\n url(\n r'^(?P\\d+)/unknown_transaction/organization_payout/$',\n self.admin_site.admin_view(CreateOrganizationPayoutJournalView.as_view()),\n name='banktransaction-add-organizationpayoutjournal'\n ),\n url(\n r'^(?P\\d+)/unknown_transaction/manual_donation/$',\n self.admin_site.admin_view(CreateManualDonationView.as_view()),\n name='banktransaction-add-manualdonation',\n ),\n url(\n r'^(?P\\d+)/unknown_transaction/retry_payout/$',\n self.admin_site.admin_view(RetryPayoutView.as_view()),\n name='banktransaction-retry-payout'\n )\n )\n return action_urls + urls\n\n\nclass DocdataPaymentInline(admin.TabularInline):\n model = RemoteDocdataPayment\n readonly_fields = ['triple_deal_reference', 'merchant_reference', 'payment_type', 'amount_collected',\n 'currency_amount_collected', 'tpci', 'docdata_fee']\n fields = readonly_fields\n\n\nclass DocdataPayoutAdmin(admin.ModelAdmin):\n date_hierarchy = 'start_date'\n\n search_fields = ['payout_reference']\n\n list_display = ['payout_reference', 'week', 'start_date', 'end_date', 'payout_date', 'payout_amount',\n 'payments_total', 'local_payments_total']\n\n readonly_fields = ['payout_reference', 'payout_date', 'payout_amount',\n 'start_date', 'end_date', 'collected_amount',\n 'payments_count', 'payments_total', 'fee_total', 'costs_total', 'vat_costs',\n 'local_payments_total', 'local_payments_count']\n\n fieldsets = (\n (None, {\n 'fields': ('payout_reference', 'payout_date', 'payout_amount',\n 'start_date', 'end_date', 'collected_amount')\n }),\n (_('Calculated from remote payments'), {\n 'fields': ('payments_count', 'payments_total', 'fee_total', 'costs_total', 'vat_costs')\n }),\n (_('Calculated from local order-payments'), {\n 'fields': ('local_payments_count', 'local_payments_total', )\n })\n )\n\n inlines = [DocdataPaymentInline,]\n\n def week(self, obj):\n if obj.start_date:\n return 'Week {0}'.format(obj.start_date.isocalendar()[1])\n return '?'\n\n def local_payments_total(self, obj):\n order_payment_ids = obj.remotedocdatapayment_set.values_list('local_payment__order_payment__id')\n order_payments = OrderPayment.objects.filter(id__in=order_payment_ids)\n order_payments = order_payments.filter(status=StatusDefinition.SETTLED)\n total = order_payments.aggregate(total=Sum('amount'))['total']\n return format(total)\n\n # return obj.remotedocdatapayment_set.filter(local_payment__order_payment__status=StatusDefinition.SETTLED).aggregate(total=Sum('local_payment__order_payment__amount'))['total']\n\n def local_payments_count(self, obj):\n payment_ids = obj.remotedocdatapayment_set.values_list('local_payment_id')\n return len(payment_ids)\n\n def payments_count(self, obj):\n count = obj.remotedocdatapayment_set.count()\n url = '/admin/accounting/remotedocdatapayment/'\n return \"{2} payments\".format(url, obj.id, count)\n payments_count.allow_tags = True\n\n def payments_total(self, obj):\n total = obj.remotedocdatapayment_set.aggregate(total=Sum('amount_collected'))\n return total['total']\n\n def fee_total(self, obj):\n return obj.remotedocdatapayment_set.aggregate(total=Sum('docdata_fee'))['total']\n\n def costs_total(self, obj):\n return obj.remotedocdatapayment_set.aggregate(total=Sum('tpci'))['total']\n\n def vat_costs(self, obj):\n costs = self.costs_total(obj) + self.fee_total(obj)\n return round(Decimal(settings.VAT_RATE) * costs * 100) / 100\n\n\nclass DocdataPaymentAdmin(IncrementalCSVImportMixin, admin.ModelAdmin):\n actions = ('find_matches',)\n\n list_display = [\n 'triple_deal_reference', 'payout_date', 'merchant_reference', 'payment_type',\n 'payment_link', 'matched', 'status', 'status_remarks', 'show_actions'\n ]\n\n list_filter = ['payment_type', DocdataPaymentMatchedListFilter, IntegrityStatusListFilter]\n\n readonly_fields = ['payout_link', 'payment_link', 'merchant_reference', 'triple_deal_reference',\n 'payment_type', 'amount_collected', 'currency_amount_collected', 'tpci',\n 'docdata_fee']\n\n fieldsets = (\n (None, {\n 'fields': ('status', 'status_remarks',)\n }),\n ('Data', {\n 'fields': readonly_fields\n }),\n )\n\n search_fields = ['merchant_reference', 'triple_deal_reference', 'remote_payout__payout_reference']\n\n import_form = DocdataPaymentImportForm\n\n def queryset(self, request):\n return super(DocdataPaymentAdmin, self).queryset(request).select_related(\n 'local_payment', 'remote_payout'\n ).annotate(\n rdp_amount_collected_sum=Sum('local_payment__remotedocdatapayment__amount_collected')\n )\n\n def payout_date(self, obj):\n if obj.remote_payout:\n return obj.remote_payout.payout_date\n return None\n payout_date.admin_order_field = 'remote_payout__payout_date'\n\n def payment_link(self, obj):\n payment = obj.local_payment\n if payment:\n url = reverse('admin:%s_%s_change' % (\n payment._meta.app_label, payment._meta.model_name), args=[payment.id])\n return \"%s\" % (str(url), payment)\n return '-'\n payment_link.allow_tags = True\n\n def matched(self, obj):\n return bool(obj.local_payment)\n matched.boolean = True\n\n def payout_link(self, obj):\n object = obj.remote_payout\n url = reverse('admin:%s_%s_change' % (object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"%s\" % (str(url), object)\n payout_link.allow_tags = True\n\n def find_matches(self, request, queryset):\n for rdp in queryset.all():\n update_remote_docdata_status(rdp)\n rdp.save()\n bulk_update_remote_docdata_spanning_multiple_weeks(queryset.all())\n find_matches.short_description = _(\"Try to match with backoffice.\")\n\n def show_actions(self, obj):\n \"\"\"\n Collect the possible actions depending on ``obj.status``.\n \"\"\"\n actions = {\n RemoteDocdataPayment.IntegrityStatus.InconsistentChargeback: (\n '%s' % (\n reverse('admin:banktransaction-unknown', kwargs={'pk': obj.pk}),\n _('mark donations failed')\n ) if not obj.has_problematic_payouts else None,\n '%s' % (\n reverse('admin:accounting_remotedocdatapayment_take_cut', kwargs={'pk': obj.pk}),\n _('take cut from organization fees')\n ) if obj.has_problematic_payouts else None,\n )\n }\n actions = [a for a in (actions.get(obj.status) or []) if a is not None]\n return \" • \".join(actions)\n show_actions.allow_tags = True\n show_actions.short_description = _('actions')\n\n def get_urls(self):\n \"\"\"\n Extra urls to save manual entries.\n \"\"\"\n urls = super(DocdataPaymentAdmin, self).get_urls()\n action_urls = patterns(\n '',\n url(\n r'^(?P\\d+)/take_cut/$',\n self.admin_site.admin_view(RDPTakeCutView.as_view()),\n name='accounting_remotedocdatapayment_take_cut'\n ),\n )\n return action_urls + urls\n\n\nclass OrderPaymentIntegrityStatuses(DjangoChoices):\n missing_docdata = ChoiceItem('missing_docdata', _('Invalid: Missing docdata payment'))\n missing_remote_docdata = ChoiceItem('missing_remote_docdata', _('Invalid: Missing remote docdata payment'))\n amount_mismatch = ChoiceItem('amount_mismatch', _('Invalid: Amount mismatch ({0} != {1})'))\n valid = ChoiceItem('valid', _('Valid'))\n\n\nclass OrderPaymentAdmin(admin.ModelAdmin):\n date_hierarchy = 'created'\n raw_id_fields = ('user', )\n readonly_fields = ('order_link', 'payment_link', 'remote_payment_link',\n 'authorization_action', 'amount', 'integration_data',\n 'payment_method', 'transaction_fee', 'status', 'created', 'closed')\n fields = ('user',) + readonly_fields\n list_display = ('created', 'user', 'status', 'amount', 'payment_method',\n 'transaction_fee', 'triple_deal_reference', 'matched',\n 'integrity_status', 'show_actions')\n list_filter = ('status', 'created', 'payment_method',\n OrderPaymentMatchedListFilter, OrderPaymentIntegrityListFilter)\n ordering = ('-created',)\n\n def get_queryset(self, request):\n return super(OrderPaymentAdmin, self).get_queryset(request).select_related('payment').annotate(\n rdp_amount_collected=Sum('payment__remotedocdatapayment__amount_collected'),\n n_journals=Count('journals')\n )\n\n def triple_deal_reference(self, obj):\n return ', '.join(obj.payment.remotedocdatapayment_set.values_list('triple_deal_reference', flat=True))\n\n def order_link(self, obj):\n object = obj.order\n url = reverse('admin:{0}_{1}_change'.format(object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"Order: {1}\".format(str(url), object.id)\n order_link.allow_tags = True\n\n def payment_link(self, obj):\n object = obj.payment\n url = reverse('admin:{0}_{1}_change'.format(object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"{1}: {2}\".format(str(url), object.polymorphic_ctype, object.id)\n payment_link.allow_tags = True\n\n def remote_payment_link(self, obj):\n if obj.n_journals:\n url = 'admin:journals_orderpaymentjournal_change'\n return u', '.join('%s' % (reverse(url, args=[journal.pk]), _('journal'))\n for journal in obj.journals.all())\n if self.matched(obj):\n object = obj.payment.remotedocdatapayment_set.all()[0]\n url = reverse('admin:{0}_{1}_change'.format(object._meta.app_label, object._meta.model_name), args=[object.id])\n return \"Remote Docdata Payment: {1}\".format(str(url), object.id)\n remote_payment_link.allow_tags = True\n\n def matched(self, obj):\n if obj.payment and (obj.payment.remotedocdatapayment_set.exists() or obj.n_journals):\n return True\n return False\n matched.boolean = True\n\n def _get_integrity_status(self, obj):\n if not hasattr(obj, '_integrity_status'):\n if not obj.payment:\n obj._integrity_status = OrderPaymentIntegrityStatuses.missing_docdata\n elif not obj.payment.remotedocdatapayment_set.exists() and not obj.n_journals:\n obj._integrity_status = OrderPaymentIntegrityStatuses.missing_remote_docdata\n # The line below is done via annotate.\n # amount_collected = obj.payment.remotedocdatapayment_set.aggregate(\n # Sum('amount_collected'))['amount_collected__sum']\n elif obj.amount == obj.rdp_amount_collected or obj.n_journals:\n obj._integrity_status = OrderPaymentIntegrityStatuses.valid\n else:\n obj._integrity_status = OrderPaymentIntegrityStatuses.amount_mismatch\n return obj._integrity_status\n\n def integrity_status(self, obj):\n integrity = self._get_integrity_status(obj)\n return OrderPaymentIntegrityStatuses.labels[integrity].format(obj.amount, obj.rdp_amount_collected)\n\n def show_actions(self, obj):\n actions = [] # empty list by default\n integrity = self._get_integrity_status(obj)\n if integrity == OrderPaymentIntegrityStatuses.missing_remote_docdata:\n actions = [\n u'%s' % (\n reverse('admin:accounting_remotedocdatapayment_import'),\n _('Needs to be settled by importing the payments and matching them.'),\n _('Keep')\n ),\n u'%s' % (\n reverse('admin:journals_orderpaymentjournal_add'),\n urlencode({\n 'amount': obj.amount,\n 'order_payment': obj.pk,\n 'description': 'remote docdata payment not found'\n }),\n _('Manual entry'),\n )\n ]\n return ' • '.join(actions)\n show_actions.allow_tags = True\n\n\nadmin.site.unregister(OrderPayment)\nadmin.site.register(OrderPayment, OrderPaymentAdmin)\n\nadmin.site.register(BankTransaction, BankTransactionAdmin)\nadmin.site.register(RemoteDocdataPayout, DocdataPayoutAdmin)\nadmin.site.register(RemoteDocdataPayment, DocdataPaymentAdmin)\n","sub_path":"bluebottle/accounting/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":19219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"540232352","text":"from __future__ import print_function\nimport numpy as np\nfrom F9utils import F9GameClient\nfrom F9utils import RLAgent\n\n\nclass SimpleAgent(RLAgent):\n def __init__(self,\n client,\n state = None,\n learning_rate=0.15,\n discount_factor=0.9,\n exploration_rate=0.6,\n exploration_decay_rate=0.96):\n\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.exploration_rate = exploration_rate\n self.exploration_decay_rate = exploration_decay_rate\n self.state = state\n self._num_actions = 8\n self.action = np.random.randint(0, self._num_actions - 1)\n\n self.client = client\n\n self.__boundaries = [\n # (0, 999), #fuel\n (-50, 50), # vx\n (0, 60), #dist\n (-0.3, 0.3), #angle\n (0, 100), # px,\n (-50, 1) #vy\n ]\n\n self.num_discrete_states = 8\n self._discrete_states = [np.linspace(low, up, self.num_discrete_states) for (low, up) in self.__boundaries]\n self._len_discrete_states = self.num_discrete_states ** len(self._discrete_states)\n self.q = np.zeros((self._len_discrete_states, self._num_actions))\n\n\n def getAction(self, state, reward):\n next_state = self._build_state(state)\n\n enable_exploration = (1 - self.exploration_rate) <= np.random.uniform(0, 1)\n\n next_action = np.random.randint(0, self._num_actions - 1)\n\n if not enable_exploration:\n next_action = np.argmax(self.q[next_state])\n\n return next_action\n\n def provideFeedback(self, state, action, reward, new_state):\n state = self._build_state(state)\n new_state = self._build_state(new_state)\n self.q[state, action] = (1 - self.learning_rate) * self.q[state, action] \\\n + (self.learning_rate * (reward + self.discount_factor\n * self.q[new_state, np.argmax(self.q[new_state])]))\n\n def _build_state(self, observation):\n\n observation = [observation[0]['vx'], observation[0]['dist'], observation[0]['angle'], observation[0]['px'],\n observation[0]['vy']]\n\n states = [np.digitize(val, self._discrete_states[i]) * (len(self._discrete_states) ** i) for i, val in\n enumerate(observation)]\n return sum(states)\n\n\ndef solve():\n # Setup agent\n client = F9GameClient()\n state = client.curState\n ai = SimpleAgent(client, state=state)\n # Observe current state\n reward = 0\n while True:\n action = ai.getAction(state, reward)\n actions = client.actions(state)\n act_arr = actions[action]# Decide what to do\n client.doAction(act_arr) # Act\n new_state = client.curState # Observe new state\n reward = client.getReward(new_state) # Observe reward\n ai.provideFeedback(state, action, reward, new_state) # Provide feeback to the agent\n\n agent, platform, system = new_state\n print(\"Agent state %s\\n Platform state %s\\n System state %s\\n Reward %s\\n\" % (agent, platform, system, reward))\n\n if client.isTerminalState(new_state):\n client.reset_game()\n state = client.curState\n else:\n state = new_state\n\nif __name__ == \"__main__\":\n solve()\n\n# -------------------------------------------------- #\n# --------------- you have landed ------------------ #\n# -------------------------------------------------- #\n","sub_path":"F9LanderQClient.py","file_name":"F9LanderQClient.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"497329530","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport difflib\nimport collections\n\nimport utils.package\nfrom utils.format import Colors, print_rows\nfrom utils.inline import InlineScan\nfrom utils.syft import Syft\n\nDEFAULT_QUALITY_GATE_THRESHOLD = 0.95\nINDENT = \" \"\n\nPACKAGE_QUALITY_GATE = collections.defaultdict(lambda: DEFAULT_QUALITY_GATE_THRESHOLD, **{})\nMETADATA_QUALITY_GATE = collections.defaultdict(lambda: DEFAULT_QUALITY_GATE_THRESHOLD, **{\n # syft is better at detecting package versions in specific cases, leading to a drop in matching metadata\n \"anchore/test_images:java\": 0.61,\n \"jenkins/jenkins:2.249.2-lts-jdk11\": 0.85,\n})\n\n# We additionally fail if an image is above a particular threshold. Why? We expect the lower threshold to be 90%,\n# however additional functionality in grype is still being implemented, so this threshold may not be able to be met.\n# In these cases the IMAGE_QUALITY_GATE is set to a lower value to allow the test to pass for known issues. Once these\n# issues/enhancements are done we want to ensure that the lower threshold is bumped up to catch regression. The only way\n# to do this is to select an upper threshold for images with known threshold values, so we have a failure that\n# loudly indicates the lower threshold should be bumped.\nPACKAGE_UPPER_THRESHOLD = collections.defaultdict(lambda: 1, **{})\nMETADATA_UPPER_THRESHOLD = collections.defaultdict(lambda: 1, **{\n # syft is better at detecting package versions in specific cases, leading to a drop in matching metadata\n \"anchore/test_images:java\": 0.65,\n \"jenkins/jenkins:2.249.2-lts-jdk11\": 0.9,\n})\n\n\ndef report(image, analysis):\n if analysis.extra_packages:\n rows = []\n print(\n Colors.bold + \"Syft found extra packages:\",\n Colors.reset,\n \"Syft discovered packages that Inline did not\",\n )\n for package in sorted(list(analysis.extra_packages)):\n rows.append([INDENT, repr(package)])\n print_rows(rows)\n print()\n\n if analysis.missing_packages:\n rows = []\n print(\n Colors.bold + \"Syft missed packages:\",\n Colors.reset,\n \"Inline discovered packages that Syft did not\",\n )\n for package in sorted(list(analysis.missing_packages)):\n rows.append([INDENT, repr(package)])\n print_rows(rows)\n print()\n\n if analysis.missing_metadata:\n print(\n Colors.bold + \"Syft mismatched metadata:\",\n Colors.reset,\n \"the packages between Syft and Inline are the same, the metadata is not\",\n )\n for inline_metadata_pair in sorted(list(analysis.missing_metadata)):\n pkg, metadata = inline_metadata_pair\n if pkg not in analysis.syft_data.metadata[pkg.type]:\n continue\n syft_metadata_item = analysis.syft_data.metadata[pkg.type][pkg]\n\n diffs = difflib.ndiff([repr(syft_metadata_item)], [repr(metadata)])\n\n print(INDENT + \"for: \" + repr(pkg), \"(top is syft, bottom is inline)\")\n print(INDENT+INDENT+(\"\\n\"+INDENT+INDENT).join(list(diffs)))\n\n if not analysis.missing_metadata:\n print(\n INDENT,\n \"There are mismatches, but only due to packages Syft did not find (but inline did).\\n\",\n )\n\n if analysis.similar_missing_packages:\n rows = []\n print(\n Colors.bold + \"Probably pairings of missing/extra packages:\",\n Colors.reset,\n \"to aid in troubleshooting missed/extra packages\",\n )\n for similar_packages in analysis.similar_missing_packages:\n rows.append(\n [\n INDENT,\n repr(similar_packages.pkg),\n \"--->\",\n repr(similar_packages.missed),\n ]\n )\n print_rows(rows)\n print()\n\n show_probable_mismatches = analysis.unmatched_missing_packages and analysis.extra_packages and len(analysis.unmatched_missing_packages) != len(analysis.missing_packages)\n\n if show_probable_mismatches:\n rows = []\n print(\n Colors.bold + \"Probably missed packages:\",\n Colors.reset,\n \"a probable pair was not found\",\n )\n for p in analysis.unmatched_missing_packages:\n rows.append([INDENT, repr(p)])\n print_rows(rows)\n print()\n\n print(Colors.bold + \"Summary:\", Colors.reset, image)\n print(\" Inline Packages : %d\" % len(analysis.inline_data.packages))\n print(\" Syft Packages : %d\" % len(analysis.syft_data.packages))\n print(\n \" (extra) : %d (note: this is ignored by the quality gate!)\"\n % len(analysis.extra_packages)\n )\n print(\" (missing) : %d\" % len(analysis.missing_packages))\n print()\n\n if show_probable_mismatches:\n print(\n \" Probable Package Matches : %d (matches not made, but were probably found by both Inline and Syft)\"\n % len(analysis.similar_missing_packages)\n )\n print(\n \" Probable Packages Matched : %2.3f %% (%d/%d packages)\"\n % (\n analysis.percent_probable_overlapping_packages,\n len(analysis.overlapping_packages)\n + len(analysis.similar_missing_packages),\n len(analysis.inline_data.packages),\n )\n )\n print(\n \" Probable Packages Missing : %d \"\n % len(analysis.unmatched_missing_packages)\n )\n print()\n print(\n \" Baseline Packages Matched : %2.3f %% (%d/%d packages)\"\n % (\n analysis.percent_overlapping_packages,\n len(analysis.overlapping_packages),\n len(analysis.inline_data.packages),\n )\n )\n print(\n \" Baseline Metadata Matched : %2.3f %% (%d/%d metadata)\"\n % (\n analysis.percent_overlapping_metadata,\n len(analysis.overlapping_metadata),\n len(analysis.inline_metadata),\n )\n )\n\n\ndef enforce_quality_gate(title, actual_value, lower_gate_value, upper_gate_value):\n\n if actual_value < lower_gate_value:\n print(\n Colors.bold\n + \" %s Quality Gate:\\t\" % title\n + Colors.FG.red\n + \"FAIL (is not >= %d %%)\" % lower_gate_value,\n Colors.reset,\n )\n return False\n elif actual_value > upper_gate_value:\n print(\n Colors.bold\n + \" %s Quality Gate:\\t\" % title\n + Colors.FG.orange\n + \"FAIL (lower threshold is artificially low and should be updated)\",\n Colors.reset,\n )\n return False\n\n print(\n Colors.bold\n + \" %s Quality Gate:\\t\" % title\n + Colors.FG.green\n + \"Pass (>= %d %%)\" % lower_gate_value,\n Colors.reset,\n )\n\n return True\n\ndef main(image):\n cwd = os.path.dirname(os.path.abspath(__file__))\n\n # parse the inline-scan and syft reports on disk\n inline = InlineScan(image=image, report_dir=os.path.join(cwd, \"inline-reports\"))\n syft = Syft(image=image, report_dir=os.path.join(cwd, \"syft-reports\"))\n\n # analyze the raw data to generate all derivative data for the report and quality gate\n analysis = utils.package.Analysis(\n syft_data=syft.packages(), inline_data=inline.packages()\n )\n\n # show some useful report data for debugging / warm fuzzies\n report(image, analysis)\n\n # enforce a quality gate based on the comparison of package values and metadata values\n success = True\n success &= enforce_quality_gate(\n title=\"Package\",\n actual_value=analysis.percent_overlapping_packages,\n lower_gate_value=PACKAGE_QUALITY_GATE[image] * 100,\n upper_gate_value=PACKAGE_UPPER_THRESHOLD[image] * 100\n )\n success &= enforce_quality_gate(\n title=\"Metadata\",\n actual_value=analysis.percent_overlapping_metadata,\n lower_gate_value=METADATA_QUALITY_GATE[image] * 100,\n upper_gate_value=METADATA_UPPER_THRESHOLD[image] * 100\n )\n\n if not success:\n return 1\n return 0\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n sys.exit(\"provide an image\")\n\n rc = main(sys.argv[1])\n sys.exit(rc)\n","sub_path":"test/inline-compare/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"237076121","text":"import bot_functions\n\n#This program is designed to run from the current company number up to the latest company formed\n\n# The current company number is stored in a file\n# This command retrieves the value from a file\n# And assigns it the name 'num'\nnum_file = 'cycle_num.txt'\nnum = bot_functions.get_num(num_file)\n\n# This command search for how many companies have a particular company number\n# The result should onlt ever be 0 or 1\n# The result is assigned to the variable 'count'\ncount = bot_functions.num_count(num)\n\n#By uncommenting the below you can try to diagnose the cause of errors\n#print (count)\n\nprint ('The starting company number is ' + num + '\\n')\nprint ('This company number occurs in the database ' + count + ' time(s)')\n\n#Use this if you want to cycle through companies:\ni = 1\nwhile count == '1':\n#\ttry:\n\tbot_functions.make_json(num)\n\tbot_functions.json_to_address(num)\n\tbot_functions.address_to_geocode(num)\n\tjson_data = bot_functions.read_json(num)\n\tbot_functions.json_to_mysql(json_data)\n\tbot_functions.json_to_sql(json_data)\n\tbot_functions.increment(num_file)\n\n\ti = i + 1\n\tnum = bot_functions.get_num(num_file)\n\tcount = bot_functions.num_count(num)\n\tif count == '0':\n\t\tbot_functions.increment(num_file)\n\t\tnum = bot_functions.get_num(num_file)\n\t\tcount = bot_functions.num_count(num)\n\t\tif count == '0':\n\t\t\tbot_functions.deincrement(num_file)\n\t\t\tnum = bot_functions.get_num(num_file)\n\t\t\tcount = bot_functions.num_count(num)\n\tprint ('The current number is ' + num)\n\tprint ('The count is :' + count + '\\n')\n\tprint ('The search has been performed ' + str(i) + ' times.\\n')\n#\texcept:\n#\t\tprint ('There was an error on ' + num + '.')\n#\t\tbot_functions.increment(num_file)\n#\t\tcontinue\n","sub_path":"crobot/no_misses_bot.py","file_name":"no_misses_bot.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"475621827","text":"# {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 부분집합의 합 10 출력하기\n# count = 0\nN = 10\nA = [0 for _ in range(N)]\n# data = [-1, 3, -9, 6, 7, -6, 1, 5, 4, -2]\ndata = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef printSet(n):\n sum = 0\n answer = []\n # global count\n # count += 1\n # print('%d : ' % (count), end='') # 생성되는 부분 배열의 개수 출력\n for i in range(n): # 각 부분 배열의 원소 출력\n if A[i] == 1: # A[i]가 1이면 포함된 것이므로 출력\n # print('%d ' % data[i], end='')\n sum += data[i]\n answer.append(data[i])\n if sum == 10:\n # print(*answer)\n for i in range(n):\n if A[i] == 1:\n print('%d ' % data[i], end='')\n print()\n # print()\n\ndef powerset(n, k): # n: 원소의 개수, k: 현재 depth\n if n == k: # Basis Part\n printSet(n)\n else: # Inductive Part\n A[k] = 1 # k번 요소 o\n powerset(n, k+1) # 다음 요소 포함 여부 결정\n A[k] = 0 # k번 요소 x\n powerset(n, k+1) # 다음 요소 포함 여부 결정\n\npowerset(N, 0)","sub_path":"algorithm/day21/연습3_부분집합.py","file_name":"연습3_부분집합.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"417596771","text":"\"\"\"The type file for the collection Lab.\n\nmoved out of __init.py__ in order to have lab specific acl\nthat allows a lab member to edit their lab info at any time\n\"\"\"\nfrom pyramid.security import (\n Allow,\n Deny,\n Everyone,\n)\n\nfrom snovault import (\n collection,\n load_schema,\n)\nfrom .base import (\n Item,\n)\n\n\n\nONLY_ADMIN_VIEW = [\n (Allow, 'group.admin', ['view', 'edit']),\n (Allow, 'group.read-only-admin', ['view']),\n (Allow, 'remoteuser.INDEXER', ['view']),\n (Allow, 'remoteuser.EMBED', ['view']),\n (Deny, Everyone, ['view', 'edit'])\n]\n\nALLOW_EVERYONE_VIEW = [\n (Allow, Everyone, 'view'),\n]\n\nALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT = [\n (Allow, Everyone, 'view'),\n (Allow, 'role.lab_submitter', 'edit'),\n] + ONLY_ADMIN_VIEW\n\n\n@collection(\n name='labs',\n unique_key='lab:name',\n properties={\n 'title': 'Labs',\n 'description': 'Listing of 4D Nucleome labs',\n })\nclass Lab(Item):\n \"\"\"Lab class.\"\"\"\n\n item_type = 'lab'\n schema = load_schema('encoded:schemas/lab.json')\n name_key = 'name'\n embedded = ['awards']\n\n STATUS_ACL = {\n 'current': ALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT,\n 'deleted': ONLY_ADMIN_VIEW,\n 'revoked': ALLOW_EVERYONE_VIEW,\n 'inactive': ALLOW_EVERYONE_VIEW,\n }\n\n def __init__(self, registry, models):\n super().__init__(registry, models)\n if hasattr(self, 'STATUS_ACL'):\n self.STATUS_ACL.update(self.__class__.STATUS_ACL)\n else:\n self.STATUS_ACL = self.__class__.STATUS_ACL\n\n def __ac_local_roles__(self):\n \"\"\"this creates roles that the lab item needs so it can be edited & viewed\"\"\"\n roles = {}\n lab_submitters = 'submits_for.%s' % self.uuid\n roles[lab_submitters] = 'role.lab_submitter'\n lab_member = 'lab.%s' % self.uuid\n roles[lab_member] = 'role.lab_member'\n return roles\n","sub_path":"src/encoded/types/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190903624","text":"#!/usr/bin/env python3\nimport os\nimport time\nfrom pathlib import Path\n\nimport log4p\nfrom dotenv import load_dotenv\n\nfrom definitions import ROOT_DIR, SYN_ENV\nfrom syn.helpers.system import check_same_python_module_already_running\nfrom syn.helpers.model.ModelHelper import get_input_params\nfrom syn.model.build.treelstm.trainings.tree_lstm import TreeLstmDuplicateTrain\n\n\n########################################################################################################################\n#\n# $ nohup python3 -u -m syn.model.build.codebooks.creacion_de_tareas.TasksForAllEndpoints --c netBeans >> output.log &\n#\n########################################################################################################################\n\n\ndef main():\n # Stores the execution start time to calculate the time it takes for the module to execute.\n initial_time = time.time()\n # Define el logger que se utilizará.\n logger = log4p.GetLogger(__name__)\n log = logger.logger\n\n log.info(f\"INICIO DE LA EJECUCIÓN\")\n env_path = Path(ROOT_DIR) / 'config' / (SYN_ENV + '.env')\n load_dotenv(dotenv_path=env_path)\n\n # Check if there is a running process that contains the name of this module.\n check_same_python_module_already_running(os.path.split(__file__))\n\n # Incializa las variables que almacenarán los argumentos de entrada.\n input_params = get_input_params()\n\n dup = TreeLstmDuplicateTrain(\n corpus='bugzilla',\n collection='clear',\n attention=False,\n attention_size=10,\n glove_size=100,\n hidden_size=100,\n max_input=200,\n batch_size=1,\n optimizer='ADAM',\n learning_rate=0.001,\n update_embeddings=True,\n patience=5\n ).load_or_run()\n\n output_dir = 'resultados/dump'\n # try:\n # dup.delete_dynet(output_dir=output_dir)\n # pass\n # except OSError:\n # pass\n # # dup.run()\n # # classifier, result = dup.load_or_run()\n # classifier, results = dup.load_or_run_dynet()\n #\n # print({x: results[x] for x in [\"Accuracy\", \"Precision\", \"Recall\", \"F1\"]})\n\n # import matplotlib.pyplot as plt\n #\n # plt.plot(*results[\"roc\"][:2])\n # plt.plot([0, 1], [0, 1], \"k--\")\n # plt.title(\"ROC\")\n # plt.show()\n #\n # results[\"confusion\"]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"syn/model/build/treelstm/trainings/TasksForAllEndpoints.py","file_name":"TasksForAllEndpoints.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"342400816","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\nfrom linuxnano.strings import col, typ\nfrom linuxnano.views.widgets.scientific_spin import ScientificDoubleSpinBox\n\nmanual_device_view_base, manual_device_view_form = uic.loadUiType(\"linuxnano/views/DeviceManualView.ui\")\n\nclass DeviceManualView(manual_device_view_base, manual_device_view_form):\n def __init__(self, parent=None):\n super(manual_device_view_base, self).__init__()\n self.setupUi(self)\n\n self._model = None\n self._mapper = QtWidgets.QDataWidgetMapper()\n\n def setSelection(self, index):\n self._sub_mappers = []\n\n #Clear out the layout with the Hal Nodes\n for i in reversed(range(self.ui_wids.count())):\n wid = self.ui_wids.takeAt(i).widget()\n if wid is not None:\n wid.deleteLater()\n\n if hasattr(index.model(), 'mapToSource'):\n index = index.model().mapToSource(index)\n\n node = index.internalPointer()\n if node is not None:\n typeInfo = node.typeInfo()\n\n parent_index = index.parent()\n self._mapper.setRootIndex(parent_index)\n self._mapper.setCurrentModelIndex(index)\n\n #Look for any BoolVarNode or FloatVarNodes\n try:\n for row in range(self._model.rowCount(index)):\n child_index = index.child(row,0)\n wid = None\n\n node = child_index.internalPointer()\n\n #A user never directly sets IO\n if node.typeInfo() == typ.D_IN_NODE:\n wid = ManualBoolView()\n\n elif node.typeInfo() in [typ.A_IN_NODE, typ.A_OUT_NODE]:\n wid = ManualFloatView()\n\n elif node.typeInfo() == typ.D_OUT_NODE:\n wid = ManualBoolView() if node.viewOnly else ManualBoolSet()\n\n elif node.typeInfo() == typ.BOOL_VAR_NODE:\n wid = ManualBoolView() if node.viewOnly else ManualBoolSet()\n\n elif node.typeInfo() == typ.FLOAT_VAR_NODE:\n wid = ManualFloatView() if node.viewOnly else ManualFloatSet()\n\n if wid is not None:\n wid.setModel(child_index.model())\n wid.setRootIndex(index)\n wid.setCurrentModelIndex(child_index)\n self.ui_wids.addWidget(wid)\n except:\n pass\n\n self.ui_wids.addStretch(1)\n\n def setModel(self, model):\n if hasattr(model, 'mapToSource'):\n model = model.sourceModel()\n self._model = model\n\n self._mapper.setModel(model)\n self._mapper.addMapping(self.ui_name, col.NAME, bytes(\"text\",'ascii'))\n self._mapper.addMapping(self.ui_description, col.DESCRIPTION, bytes(\"text\",'ascii'))\n self._mapper.addMapping(self.ui_status, col.STATUS, bytes(\"text\",'ascii'))\n\n def model(self):\n return self._model\n\n\n\nclass ManualBoolView(QtWidgets.QWidget):\n #Format: \"Name : value\"\n def __init__(self):\n super().__init__()\n self.mapper = QtWidgets.QDataWidgetMapper()\n hbox = QtWidgets.QHBoxLayout()\n self.setLayout(hbox)\n\n self._val = False\n self.ui_name = QtWidgets.QLabel('unknown')\n self.ui_val = QtWidgets.QLabel('?')\n self._off_name = \"\"\n self._on_name = \"\"\n\n hbox.addWidget(self.ui_name)\n hbox.addWidget(QtWidgets.QLabel(': '))\n hbox.addWidget(self.ui_val)\n hbox.addStretch(1)\n\n def setRootIndex(self, index):\n self.mapper.setRootIndex(index)\n\n def setCurrentModelIndex(self, index):\n self.mapper.setCurrentModelIndex(index)\n node = index.internalPointer()\n\n #These aren't changing often so they can just be set\n self.ui_name.setText(str(node.name))\n self._off_name = node.offName\n self._on_name = node.onName\n\n txt = self._on_name if node.value() else self._off_name\n self.ui_val.setText(txt)\n\n\n @QtCore.pyqtProperty(int)\n def value(self):\n return self._val\n\n @value.setter\n def value(self, value):\n self._val = value\n txt = self._on_name if value else self._off_name\n self.ui_val.setText(txt)\n\n def setModel(self, model):\n if hasattr(model, 'sourceModel'):model = model.sourceModel()\n self.mapper.setModel(model)\n self.mapper.addMapping(self, col.VALUE, bytes('value','ascii'))\n\n\nclass ManualBoolSet(QtWidgets.QWidget):\n #Format \"Name : bnt_off btn_on\", buttons get grayed out if enable_manual isn't true\n def __init__(self):\n super().__init__()\n self.mapper_1 = QtWidgets.QDataWidgetMapper()\n self.mapper_2 = QtWidgets.QDataWidgetMapper()\n\n hbox = QtWidgets.QHBoxLayout(self)\n self.setLayout(hbox)\n\n self.ui_name = QtWidgets.QLabel('unknown')\n\n self.btn_group = QtWidgets.QButtonGroup()\n self.btn_group.setExclusive(True)\n self.btn_group.buttonClicked.connect(self.onClicked)\n\n self.ui_btn1 = QtWidgets.QPushButton('?', self)\n self.ui_btn2 = QtWidgets.QPushButton('?', self)\n self.ui_btn1.setCheckable(True)\n self.ui_btn2.setCheckable(True)\n\n self.btn_group.addButton(self.ui_btn1, 0)\n self.btn_group.addButton(self.ui_btn2, 1)\n\n hbox.addWidget(self.ui_name)\n hbox.addWidget(QtWidgets.QLabel(': '))\n hbox.addWidget(self.ui_btn1)\n hbox.addWidget(self.ui_btn2)\n hbox.addStretch(1)\n\n self.via_this_button = False\n self.btn_group.buttonClicked.connect(self.mapper_1.submit)\n self.mapper_1.currentIndexChanged.connect(self.mapper_2.setCurrentIndex)\n\n\n def setRootIndex(self, index):\n self.mapper_1.setRootIndex(index)\n self.mapper_2.setRootIndex(index)\n\n def setCurrentModelIndex(self, index):\n self.mapper_1.setCurrentModelIndex(index)\n self.mapper_2.setCurrentModelIndex(index)\n\n node = index.internalPointer()\n\n #These aren't changing often so they can just be set\n self.ui_name.setText(str(node.name))\n self.ui_btn1.setText(str(node.offName))\n self.ui_btn2.setText(str(node.onName))\n\n def onClicked(self, btn):\n self.via_this_button = True\n self.value = self.btn_group.checkedId()\n\n @QtCore.pyqtProperty(int)\n def value(self):\n return self.btn_group.checkedId()\n\n @value.setter\n def value(self, value):\n if not self.via_this_button:\n if value:\n self.ui_btn2.setChecked(True)\n else:\n self.ui_btn1.setChecked(True)\n\n self.via_this_button = False\n\n @QtCore.pyqtProperty(int)\n def enableManual(self):\n return self.ui_btn1.isEnabled()\n\n @enableManual.setter\n def enableManual(self, value):\n if value:\n self.ui_btn1.setEnabled(True)\n self.ui_btn2.setEnabled(True)\n else:\n self.ui_btn1.setEnabled(False)\n self.ui_btn2.setEnabled(False)\n\n def setModel(self, model):\n if hasattr(model, 'sourceModel'):model = model.sourceModel()\n self.mapper_1.setModel(model)\n self.mapper_2.setModel(model)\n\n self.mapper_1.addMapping(self, col.VALUE, bytes('value','ascii'))\n self.mapper_2.addMapping(self, col.ENABLE_MANUAL, bytes('enableManual','ascii'))\n\n\nclass ManualFloatView(QtWidgets.QWidget):\n #Format: \"Name : value units\"\n def __init__(self):\n super().__init__()\n self.mapper = QtWidgets.QDataWidgetMapper()\n\n hbox = QtWidgets.QHBoxLayout()\n self.setLayout(hbox)\n\n self._val = 0\n self._display_digits = 3\n self._display_scientific = False\n\n self.ui_name = QtWidgets.QLabel('unknown')\n self.ui_val = QtWidgets.QLabel('?')\n self.ui_units = QtWidgets.QLabel('')\n\n hbox.addWidget(self.ui_name)\n hbox.addWidget(QtWidgets.QLabel(': '))\n hbox.addWidget(self.ui_val)\n hbox.addWidget(self.ui_units)\n hbox.addStretch(1)\n\n def setRootIndex(self, index):\n self.mapper.setRootIndex(index)\n\n def setCurrentModelIndex(self, index):\n self.mapper.setCurrentModelIndex(index)\n node = index.internalPointer()\n\n #These aren't changing often so they can just be set\n self.ui_name.setText(str(node.name))\n self.ui_units.setText(str(node.units))\n self._display_digits = index.internalPointer().displayDigits\n self._display_scientific = index.internalPointer().displayScientific\n\n @QtCore.pyqtProperty(float)\n def val(self):\n return self._val\n\n @val.setter\n def val(self, value):\n try:\n self._val = value\n\n if self._display_scientific:\n txt = \"%.*e\"%(self._display_digits, self._val)\n else:\n txt = \"%0.*f\"%(self._display_digits, self._val)\n\n self.ui_val.setText(txt)\n\n except:\n self.ui_val.setText('')\n\n def setModel(self, model):\n if hasattr(model, 'sourceModel'):model = model.sourceModel()\n self.mapper.setModel(model)\n self.mapper.addMapping(self, col.VALUE, bytes('val','ascii')) #Only one mapping of 'self' allowed per mapper\n\n\n#TODO I think I need to add digits and scientific logic to this\nclass ManualFloatSet(QtWidgets.QWidget):\n #Format: \"Name : value units\"\n def __init__(self):\n super().__init__()\n self.mapper = QtWidgets.QDataWidgetMapper()\n self.mapper = QtWidgets.QDataWidgetMapper()\n\n hbox = QtWidgets.QHBoxLayout()\n self.setLayout(hbox)\n\n self.ui_name = QtWidgets.QLabel('unknown')\n self.ui_value = ScientificDoubleSpinBox()\n self.ui_units = QtWidgets.QLabel('units')\n hbox.addWidget(self.ui_name)\n hbox.addWidget(QtWidgets.QLabel(': '))\n hbox.addWidget(self.ui_value)\n hbox.addWidget(self.ui_units)\n hbox.addStretch(1)\n\n def setRootIndex(self, index):\n self.mapper.setRootIndex(index)\n\n def setCurrentModelIndex(self, index):\n self.mapper.setCurrentModelIndex(index)\n\n #These aren't changing often so they can just be set\n node = index.internalPointer()\n self.ui_name.setText(str(node.name))\n self.ui_units.setText(str(node.units))\n\n @QtCore.pyqtProperty(int)\n def enableManual(self):\n return self.ui_value.isEnabled()\n\n @enableManual.setter\n def enableManual(self, value):\n if value:\n self.ui_value.setEnabled(True)\n else:\n self.ui_value.setEnabled(False)\n\n def setModel(self, model):\n if hasattr(model, 'sourceModel'):model = model.sourceModel()\n self.mapper.setModel(model)\n\n self.mapper.addMapping(self.ui_value, col.VALUE)# bytes(\"text\",'ascii'))\n self.mapper.addMapping(self, col.ENABLE_MANUAL, bytes('enableManual','ascii'))\n","sub_path":"linuxnano/views/widgets/device_manual_view.py","file_name":"device_manual_view.py","file_ext":"py","file_size_in_byte":10902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"581418982","text":"import math\nimport time\n\n#define nestedSum function\ndef factorial(testnumber):\n if testnumber in [0, 1]:\n factorialValue = 1\n elif testnumber < 0:\n testnumber = int(input(\"Enter a non-negative number: \"))\n print(\"New test number: %d\" % testnumber)\n factorialValue = factorial(testnumber)\n return factorialValue\n else:\n factorialValue = testnumber * factorial(testnumber-1)\n return factorialValue\n\n\n# call functions\nstart_time = time.time()\ntestfigure = 3\nprint(\"Factorial is: %d\" % factorial(testfigure))\nprint(\"Time: %f seconds\" % (time.time() - start_time))\nprint(\"Initial test number: %s\" % testfigure)","sub_path":"algorithms/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"349474400","text":"import pyautogui\r\nimport time\r\nimport os\r\nimport random\r\n\r\n#** prints que indicam que o episodio acabou\r\n#** precisam ser atualizados constantemente\r\n#** 1 pixel de diferença fará com que o programa\r\n#** não funcione corretamente\r\n\r\n#Edições para o nome do que está sendo assistido\r\n\r\ni = 1\r\nepi = 1\r\ntemporada = 1\r\nnome = \"Nome_da_Serie\" #usar underlines para separar o nome\r\ndissname = nome+'_'+'temporada_'+str(temporada)+'_ep_' #nome que será salvo o arquivo\r\n\r\n# Inicio dos prints\r\n\r\nwhile(True):\r\n \r\n print(\"imagem:\",i) # indica no console quantas ss foram tiradas\r\n time.sleep(2) # aguarda x segundos até a próxima ss\r\n pic = pyautogui.screenshot() # chama a função que irá capturar a tela\r\n pic.save('episodio'+str(epi)+'/'+dissname+str(epi)+'_'+str(i)+'.png') #salva a ss no formato -> episodio1/nome_temporada_i.png \r\n i+=1 # incrementa o valor do contador\r\n \r\n # Verificando se episodio acabou\r\n # Só verifica se o episodio acabou a partir da 300 ss (mais da metade do episodio)\r\n if(i>300):\r\n print('procurando')\r\n localizar = pyautogui.locateOnScreen('1.png')#,region=(1000,400,400,400)) #vai localizar a imagem rem2.PNG na região indicada\r\n # Quando a imagem não é encontrada, ela retorna 'None'\r\n print(type(localizar))\r\n if(localizar is not None): # Então, saberemos que o episódio acabou quando a variavel 'localizar' não for 'None'\r\n x_aleatorio = random.randint(1,180) #gera um valor aleatório para x dentro de um range específico\r\n y_aleatorio = random.randint(1,150) #gera um valor aleatório para y dentro de um range específico\r\n x_variando = 1000+x_aleatorio # soma com o valor base onde saberemos que o botão que desejamos estará\r\n y_variando = 470+y_aleatorio # soma com o valor base onde saberemos que o botão que desejamos estará\r\n local_click = (x_variando,y_variando) #vai clicar nessa região da tela, que é o local do proximo ep\r\n pyautogui.click(local_click) #clica no botao de ver proximo ep\r\n os.mkdir('episodio'+str((epi+1))) #cria um diretorio para alocar as imagens do novo episodio\r\n i = 1 #reseta os prints tirados\r\n epi+=1 #atualiza o ep que está assistindo\r\n\r\n # Podemos criar uma condição de parada, para que o programa não rode eternamente\r\n else:\r\n pass\r\n #if(epi>10):\r\n #break\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"74457989","text":"def count_arithmetic(totalMinutes):\n h = 12\n m1 = 0\n m2 = 0\n\n def incrementTime():\n \"\"\" Increments current time by one minute. \"\"\"\n nonlocal h\n nonlocal m1\n nonlocal m2\n m2 += 1\n if m2 > 9:\n m2 = 0\n m1 += 1\n if m1 > 5:\n m1 = 0\n h += 1\n if h > 12:\n h = 1\n\n def is_arithmetic():\n \"\"\" Checks if the current time is an arithmetic sequence. \"\"\"\n nonlocal h\n nonlocal m1\n nonlocal m2\n if h > 9:\n step = (h%10) - (h//10)\n if m1 - (h%10) != step:\n return False\n else:\n step = m1 - h\n \n if m2 - m1 != step:\n return False\n\n return True\n \n if totalMinutes <= 720:\n # Iterate through all the times, and count the arithmetic sequences\n i = 0\n counter = 0\n while i < totalMinutes:\n if is_arithmetic():\n counter += 1\n incrementTime()\n i += 1\n # Check ending time\n if is_arithmetic():\n counter += 1\n else:\n # There are 720 in a full cycle (12:00 - 12:00), so we can call count_arithmetic once to determine the arithmetic sequences in one cycle, and multiply by the number of cycles\n # Since function checks ending time, pass 719 instead of 720 to prevent double counting 12:00, even though 12:00 is not an arithmetic sequence\n counter = count_arithmetic(719) * (totalMinutes // 720);\n counter += count_arithmetic(totalMinutes % 720)\n\n return counter\n\ni = int(input())\nprint(count_arithmetic(i))\n","sub_path":"favourite_times.py","file_name":"favourite_times.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"54244201","text":"import time\nimport ray\nfrom matplotlib import pyplot as plt\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\n\n@ray.remote\ndef stress_function(num):\n return sum([i*j*k for i in range(num) for j in range(i) for k in range(j)])\n\nif __name__ == \"__main__\":\n ray.init(address = \"auto\")\n\n inp = int(input(\"Enter a number: \"))\n czas = []\n try:\n wb = Workbook()\n except:\n wb = load_workbook(\"excel_data.xlsx\")\n sheet = wb.active\n\n for i in range(inp):\n start = time.time()\n futures = [stress_function.remote(i)]\n ray.get(futures)\n czas.append(time.time() - start)\n\n for i in range(inp):\n sheet.cell(row=i + 1, column=1).value = i + 1\n sheet.cell(row=i + 1, column=2).value = czas[i]\n\n wb.save(\"excel_data.xlsx\")\n\n\n dev_x = [i for i in range(inp)]\n dev_y = [czas[i] for i in range(inp)]\n\n plt.xlabel(\"Number of iterations\")\n plt.ylabel(\"Time in seconds\")\n plt.title(\"Time of compilation by number of iterations\")\n\n plt.plot(dev_x, dev_y)\n\n plt.legend([\"Stress Function\"])\n\n plt.show(block=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"210759581","text":"from core.config import Config\nfrom core.messageCache import MessageCache\nfrom core.databaseConnector import DatabaseConnector\nimport pickle\nimport json\nimport asyncio\n\n\nclass SensorSubmitter(MessageCache):\n\n def __init__(self, config: Config, database_connector: DatabaseConnector) -> None:\n super().__init__(config.INTERNAL_MESSAGE_CACHE_MAX_QUEUE_LENGTH)\n self.database_connector = database_connector\n self.__config = config\n self.__insert_batch = []\n self.__insert_loop: asyncio.Task = asyncio.get_event_loop().create_task(self.insert_loop())\n\n async def publish(self, message: str) -> None:\n return await super().publish(pickle.dumps(message))\n\n async def process_message(self, message: bytes) -> None:\n message_deserialized = json.loads(pickle.loads(message))\n\n self.__insert_batch.extend(message_deserialized)\n # Ensure the insert Loop is restart on errors\n if self.__insert_loop.done():\n self.__insert_loop = asyncio.get_event_loop().create_task(self.insert_loop())\n\n async def insert_loop(self):\n while True:\n await asyncio.sleep(2)\n if len(self.__insert_batch) > 0:\n insert_batch = self.__insert_batch.copy()\n self.__insert_batch = []\n try:\n await self.database_connector.create_new_sensor_entry(json.dumps(insert_batch))\n except:\n # Only at least once; Not exactly once\n print('Database bulk insert issue. Retrying one by one..')\n while len(insert_batch) > 0:\n try:\n await self.create_in_database(json.dumps(insert_batch.pop()))\n except:\n pass\n\n async def create_in_database(self, message: str):\n try:\n return await self.database_connector.create_new_sensor_entry(message)\n except:\n # Only at least once; Not exactly once\n print('Database issue. Retrying..')\n await self.publish(pickle.dumps(message))\n","sub_path":"cloud/src/handler/sensorSubmitter.py","file_name":"sensorSubmitter.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"316303502","text":"\"\"\"Class to handle chapter processing for books.\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport hashlib\nimport subprocess\nimport inspect\nimport copy\n\nimport lxml\nfrom lxml import etree\n\ntry:\n from termcolor import colored\nexcept ImportError:\n logging.error(\"Please install termcolor:\\n sudo pip install termcolor\")\n\nfrom XmlValidator import XmlValidator\nfrom XmlValidator import XmlValidationError\nfrom . import htmlutils\nimport imageutils\nfrom utils import mkdir_p, copy_if_newer, TOCBuilder, TocElement, add_unique_ids\n\n\nspecpath = os.path.join(os.path.dirname(inspect.getfile(XmlValidator)),\n 'spec.xml')\n\nDEBUG = False\n\n\ndef print_debug_msg(msg):\n \"\"\"Print a debug message if DEBUG is True.\"\"\"\n if DEBUG:\n print(colored(\"DEBUG: {msg}\".format(msg=msg), \"yellow\"))\n\n\nclass chapter(object):\n \"\"\"Class to represent a single chapter.\"\"\"\n\n def __init__(self, cnxmlplusfile, **kwargs):\n \"\"\"Cnxmlplusfile is the path of the file.\"\"\"\n # set some default attributes.\n self.file = cnxmlplusfile\n self.chapter_number = None\n self.title = None\n self.hash = None\n self.has_changed = True\n self.valid = None\n self.conversion_success = {'tex': False,\n 'html': False,\n 'xhtml': False,\n 'mobile': False,\n 'html5': False}\n\n # set attributes from keyword arguments\n # This can be used to set precomputed values e.g. read from a cache\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n if not self.render_problems:\n # if this has not been instantiated yet create an empty\n # dict to keep track of image rendering success for every output\n # format. Make all True to force image generation on first go.\n self.render_problems = {'tex': True,\n 'html': True,\n 'xhtml': True,\n 'mobile': True,\n 'html5': True}\n # Parse the xml\n self.parse_cnxmlplus()\n\n def calculate_hash(self, content):\n \"\"\"Calculate the md5 hash of the file content and returns it.\"\"\"\n m = hashlib.md5()\n m.update(content)\n\n return m.hexdigest()\n\n def parse_cnxmlplus(self):\n \"\"\"Parse the xml file and save some information.\"\"\"\n with open(self.file, 'r') as f_in:\n content = f_in.read()\n\n if (self.hash is None) or (self.valid is False):\n self.hash = self.calculate_hash(content)\n # if the hash is None, it has not been passed from Book class and\n # hence didn't exist in the cache. Need to validate this file\n self.validate()\n else:\n # If self.hash has been set and it differs from current hash, then\n # re-validate\n current_hash = self.calculate_hash(content)\n if self.hash != current_hash:\n self.validate()\n self.hash = current_hash\n self.has_changed = True\n else:\n # file is valid, no validation required.\n self.valid = True\n self.hash = current_hash\n self.has_changed = False\n\n try:\n xml = etree.XML(content)\n except lxml.etree.XMLSyntaxError:\n logging.error(\n colored(\"{file} is not valid XML!\".format(\n file=self.file), 'red'))\n return None\n\n # save the number\n try:\n self.chapter_number = int(self.file[0:self.file.index('-')])\n except:\n self.chapter_number = 'N/A'\n logging.warn(\n \"{file} doesn't follow naming convention \\\n CC-title-here.cnxmlplus\".format(file=self.file))\n\n # The title should be in in an element called \n # inside a <section type=\"chapter\"> and there should only be one in the\n # file. For now.\n chapters = xml.findall('.//section[@type=\"chapter\"]')\n if len(chapters) > 1:\n logging.error(\n \"{filename} contains more than 1 chapter!\".format(\n filename=self.file))\n elif len(chapters) < 1:\n logging.error(\n \"{filename} contains no chapters!\".format(filename=self.file))\n else:\n self.title = chapters[0].find('.//title').text\n\n def info(self):\n \"\"\"Return a formatted string with all the details of the chapter.\"\"\"\n info = '{ch}'.format(ch=self.chapter_number)\n info += ' ' * (4 - len(info))\n if self.valid:\n info += colored('OK'.center(8), 'green')\n else:\n info += colored('Not OK'.center(8), 'red')\n info += ' ' * (24 - len(info))\n info += '{file}'.format(file=self.file)\n\n return info\n\n def validate(self):\n \"\"\"\n Run the validator on this file.\n\n Set self.valid to True or False depending on the outcome.\n \"\"\"\n print(\"Validating {f}\".format(f=self.file))\n\n with open(self.file, 'r') as xmlfile:\n xml = xmlfile.read()\n # see if it is valid XML first\n try:\n etree.XML(xml)\n except etree.XMLSyntaxError:\n self.valid = False\n return\n # create an instance of the Validator\n xmlValidator = XmlValidator(open(specpath, 'rt').read())\n try:\n xmlValidator.validate(xml)\n self.valid = True\n except XmlValidationError as err:\n print(err)\n self.valid = False\n\n def __xml_preprocess(self, xml):\n \"\"\"\n Prepare the xml for processing.\n\n This is an internal method for the chapter class that tweaks the\n cnxmlplus before it is converted to one of the output formats e.g.\n image links are changed to point one folder up so that the output files\n in the build folder points to where the current images are located.\n\n This method is called from the convert method.\n\n input: cnxmlplus is an etree object of the cnxmlplus file\n output: etree object with pr\n\n \"\"\"\n processed_xml = xml\n\n return processed_xml\n\n def __copy_tex_images(self, build_folder, output_path):\n \"\"\"\n Copy tex images.\n\n Find all images referenced in the cnxmlplus document and copy them\n to their correct relative places in the build/tex folder.\n\n \"\"\"\n success = True\n with open(self.file) as f_in:\n xml = etree.XML(f_in.read())\n\n # if it is tex, we can copy the images referenced in the cnxmlplus\n # directly to the build/tex folder\n for image in xml.findall('.//image'):\n # find the src, it may be an attribute or a child element\n if 'src' in image.attrib:\n src = image.attrib['src'].strip()\n else:\n src = image.find('.//src').text.strip()\n\n # check for paths starting with /\n if src.startswith('/'):\n print(colored(\"ERROR! image paths may not start with /: \",\n \"red\") + src)\n success = False\n continue\n\n dest = os.path.join(build_folder, 'tex', src)\n if not os.path.exists(dest):\n try:\n mkdir_p(os.path.dirname(dest))\n except OSError:\n msg = colored(\"WARNING! {dest} is not allowed!\"\n .format(dest=dest),\n \"magenta\")\n success = False\n print(msg)\n success = copy_if_newer(src, dest)\n\n return success\n\n def __render_pstikz(self, output_path, parallel=True):\n \"\"\"\n Render the pstikz images.\n\n Use Bookbuilder/pstricks2png to render each pstricks and tikz\n image to png. Insert replace div.alternate tags with <img> tags\n Also, find pstricks and tikz in tex output and replace with\n includegraphics{}.\n \"\"\"\n rendered = imageutils.render_images(output_path, parallel=parallel)\n\n return rendered\n\n def __copy_html_images(self, build_folder, output_path):\n \"\"\"\n Copy html images.\n\n Find all images referenced in the converted html document and copy\n them to their correct relative places in the build/tex folder.\n \"\"\"\n success = True\n # copy images directly included in cnxmlplus to the output folder\n with open(output_path, 'r') as f_in:\n html = etree.HTML(f_in.read())\n\n for img in html.findall('.//img'):\n src = img.attrib['src']\n dest = os.path.join(os.path.dirname(output_path), src)\n if not os.path.exists(src) and (not os.path.exists(dest)):\n print_debug_msg(src + \" doesn't exist\")\n\n success = copy_if_newer(src, dest)\n\n return success\n\n def __tolatex(self):\n \"\"\"Convert this chapter to latex.\"\"\"\n print_debug_msg(\"Entered __tolatex {f}\".format(f=self.file))\n myprocess = subprocess.Popen([\"cnxmlplus2latex\", self.file],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n latex, err = myprocess.communicate()\n\n return latex\n\n def __tohtml(self):\n \"\"\"Convert this chapter to html.\"\"\"\n print_debug_msg(\"Entered __tohtml {f}\".format(f=self.file))\n# tohtmlpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n# 'tohtml.py')\n myprocess = subprocess.Popen([\"cnxmlplus2html\", self.file],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n html, err = myprocess.communicate()\n # html = htmlutils.add_mathjax(html)\n html = htmlutils.repair_equations(html)\n\n return html\n\n def __tohtml5(self):\n ''' Convert this chapter to latex\n '''\n print_debug_msg(\"Entered __tohtml5 {f}\".format(f=self.file))\n# tohtmlpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n# 'tohtml.py')\n myprocess = subprocess.Popen([\"cnxmlplus2html5\", self.file],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n html, err = myprocess.communicate()\n # html = htmlutils.add_mathjax(html)\n html = htmlutils.repair_equations(html)\n\n return html\n\n def __toxhtml(self):\n \"\"\"Convert this chapter to xhtml.\"\"\"\n xhtml = self.__tohtml()\n # Convert this html to xhtml\n xhtml = htmlutils.xhtml_cleanup(xhtml)\n\n return xhtml\n\n def __tomobile(self):\n \"\"\"Convert this chapter to mobile.\"\"\"\n html = self.__toxhtml()\n\n return html\n\n def convert(self, build_folder, output_format, parallel=True):\n \"\"\"Convert the chapter to the specified output format.\n\n Write to the build folder: {build_folder}/{output_format}/self.file.{format}\n e.g. build/tex/chapter1.cnxmlplus.tex as needed.\n\n output_format: one of 'tex', 'html'.\n \"\"\"\n conversion_functions = {'tex': self.__tolatex,\n 'html': self.__tohtml,\n 'xhtml': self.__toxhtml,\n 'mobile': self.__tomobile,\n 'html5': self.__tohtml5}\n\n for outformat in output_format:\n\n # convert this chapter to the specified format\n # call the converted method\n output_path = os.path.join(build_folder, outformat,\n self.file +\n '.{f}'.format(f=outformat))\n\n if outformat == 'mobile':\n output_path = output_path.replace(r'.mobile', '.html')\n if outformat == 'html5':\n output_path = output_path.replace(r'.html5', '.html')\n\n # only try this on valid cnxmlplus files\n if self.valid:\n # run the conversion only if the file has changed OR if it\n # doesn't exist (it may have been deleted manually)\n\n if any((self.has_changed,\n not os.path.exists(output_path),\n self.render_problems)):\n\n mkdir_p(os.path.dirname(output_path))\n print(\"Converting {ch} to {form}\".format(ch=self.file,\n form=outformat))\n converted = conversion_functions[outformat]()\n with open(output_path, 'w') as f_out:\n # This is a bit of a hack, not quite sure why I need\n # this\n if outformat == 'html' or outformat == 'html5':\n f_out.write(converted.encode('utf-8'))\n else:\n f_out.write(converted)\n\n # file has not changed AND the file exists\n elif (not self.has_changed) and (os.path.exists(output_path)):\n print(\"{f} {space} done {form}\"\n .format(f=self.file,\n space=' ' * (40 - len(self.file)),\n form=outformat))\n\n # copy the images to the build folder even if the file has not\n # changed and is still valid, the image may have been copied in\n # by the user\n if outformat == 'tex':\n copy_success = self.__copy_tex_images(build_folder,\n output_path)\n rendered = self.__render_pstikz(output_path,\n parallel=parallel)\n\n elif outformat == 'html':\n # copy images included to the output folder\n copy_success = self.__copy_html_images(build_folder,\n output_path)\n # read the output html, find all pstricks and tikz\n # code blocks and render them as pngs and include them\n # in <img> tags in the html\n rendered = self.__render_pstikz(output_path,\n parallel=parallel)\n\n elif outformat == 'xhtml':\n # copy images from html folder\n copy_success = self.__copy_html_images(build_folder,\n output_path)\n rendered = self.__render_pstikz(output_path,\n parallel=parallel)\n\n elif outformat == 'mobile':\n # copy images from html folder\n copy_success = self.__copy_html_images(build_folder,\n output_path)\n rendered = self.__render_pstikz(output_path,\n parallel=parallel)\n elif outformat == 'html5':\n # copy images from html folder\n copy_success = self.__copy_html_images(build_folder,\n output_path)\n rendered = self.__render_pstikz(output_path,\n parallel=parallel)\n if not (rendered and copy_success):\n self.render_problems = True\n else:\n self.render_problems = False\n\n def split_into_sections(self, formats=None):\n \"\"\"\n Split this chapter into seperate files, each containing a section.\n\n The first one contains the h1 element for the chapter too\n \"\"\"\n if formats is None:\n formats = ['html', 'xhtml', 'mobile', 'html5']\n\n for form in formats:\n if 'tex' in form:\n continue\n if form == 'xhtml':\n ext = '.xhtml'\n else:\n ext = '.html'\n\n chapterfilepath = os.path.join('build', form, self.file + ext)\n\n with open(chapterfilepath) as chapterfile:\n html = etree.HTML(chapterfile.read())\n # add unique IDs to all the section titles.\n html = add_unique_ids(html)\n # make a copy of the html, want to use as template.\n html_template = copy.deepcopy(html)\n for bodychild in html_template.find('.//body'):\n bodychild.getparent().remove(bodychild)\n\n if form != 'html5':\n # build up a list of the sections\n sections = []\n chapter = [c.getparent() for c in html.findall('.//div[@class=\"section\"]/h1')][0]\n\n thissection = []\n for child in chapter:\n if (child.tag != 'div'):\n thissection.append(child)\n else:\n if len(child) == 0:\n pass\n elif (child[0].tag == 'h2') or (child.attrib.get('class') == 'exercises'):\n thissection.append(child)\n sections.append(thissection)\n thissection = []\n else:\n thissection.append(child)\n else:\n # build up a list of the sections\n sections = []\n try:\n chapter = [c.getparent() for c in html.findall('.//section[@class=\"section\"]/h1')][0]\n except IndexError:\n continue\n\n thissection = []\n for child in chapter:\n if (child.tag != 'section'):\n thissection.append(child)\n else:\n if len(child) == 0:\n pass\n elif (child[0].tag == 'h2'):\n thissection.append(child)\n sections.append(thissection)\n thissection = []\n else:\n thissection.append(child)\n #sections.append(thissection)\n # write each section to a separate file\n for num, section in enumerate(sections):\n template = copy.deepcopy(html_template)\n body = template.find('.//body')\n for child in section:\n body.append(child)\n secfilename = self.file.replace('.cnxmlplus',\n '-{:02d}.cnxmlplus'.format(num))\n secfilepath = os.path.join('build', form, secfilename + ext)\n\n # add css to head\n css = '<link rel=\"stylesheet\" type=\"text/css\" href=\"css/stylesheet.css\"></link>'\n css = etree.fromstring(css)\n template.find('.//head').append(css)\n\n with open(secfilepath, 'w') as outfile:\n outfile.write(etree.tostring(template))\n\n # remove the original html\n os.remove(chapterfilepath)\n # create the ToC file.\n self.create_toc(os.path.dirname(chapterfilepath))\n\n def create_toc(self, path):\n \"\"\"\n Create the table of contents.\n\n Read all the html files in path and use div.section>h1 and\n div.section>h2 to make table of contents.\n \"\"\"\n # get all the (x)html files\n file_list = [f for f in os.listdir(path) if f.endswith('html')]\n file_list.sort()\n\n toc = []\n\n for htmlfile in file_list:\n with open(os.path.join(path, htmlfile)) as hf:\n html = etree.HTML(hf.read())\n for element in html.iter():\n if element.tag in ['h1', 'h2']:\n parent = element.getparent()\n if (parent.attrib.get('class') in ['section']) or (parent.tag == 'body'):\n\n # exercises are special\n #if parent.attrib.get('class') == 'exercises':\n ##ancestors = len([a for a in element.iterancestors() if a.tag == 'div']) + 1\n #element.text = \"Exercises\"\n #element.tag = 'h{}'.format(ancestors)\n\n toc.append((htmlfile, copy.deepcopy(element)))\n\n tocelements = [TocElement(t[0], t[1]) for t in toc]\n\n assert(len(toc) == len(set(toc)))\n\n tocbuilder = TOCBuilder()\n for tocelement in tocelements:\n tocbuilder.add_entry(tocelement)\n\n toccontent = \"\"\"\\\n <html>\n <head>\n <title>Table of contents\n \n \n \n {}\n \n\n \n\n \"\"\".format(etree.tostring(tocbuilder.as_etree_element(),\n pretty_print=True))\n\n # TODO, add ids to the section html pages.\n with open(os.path.join(path, 'tableofcontents.html'), 'w') as tocout:\n tocout.write(toccontent)\n\n def __str__(self):\n chapno = str(self.chapter_number).ljust(4)\n return \"{number} {title}\".format(number=chapno, title=self.title)\n","sub_path":"LibBookbuilder/chapter.py","file_name":"chapter.py","file_ext":"py","file_size_in_byte":22009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"437099530","text":"#!/usr/bin/env python\n\nimport sys\n\ndef main():\n filename = sys.argv[1]\n taskset_ids = sys.argv[2:]\n issued = set()\n finished = set()\n with open(filename) as f:\n for line in f:\n if taskset_ids:\n for taskset_id in taskset_ids:\n if taskset_id not in line:\n continue\n fields = line.split()\n if fields[3] == 'Issuing':\n i = 5\n if fields[4] == 'delayed':\n i += 1\n issued.add(fields[i])\n elif len(fields) > 8 and fields[8] == 'finished':\n finished.add(fields[4])\n print(len(issued))\n print(len(finished))\n print('\\n'.join(sorted(issued.symmetric_difference(finished))))\n\nif __name__ == '__main__':\n main()\n","sub_path":"l_tasks.py","file_name":"l_tasks.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"429306668","text":"\"\"\" Game to play 'Mobile Game'. This is the file you edit.\n\"\"\"\nfrom GUI import *\nfrom tree import *\n\nclass Balancer:\n \n def __init__(self):\n # put other instance variables here\n self.gui = GUI(\"Mobiles\", self)\n self.tree = Tree()\n\n def start(self):\n self.gui.run()\n \n def importMobile(self, fileName):\n \"\"\" Called by GUI when the 'Import' button is clicked.\n Open the file and fill the tree. \"\"\"\n try:\n # Primes the input file for reading.\n fileObj = open(fileName)\n except:\n print('Check your file name, input file does not exist in directory')\n print('Printing last inputted tree.')\n return\n # Copies data into variable\n fileData = fileObj.read()\n try:\n # Recursively creates the tree starting at the root.\n root = self.tree._createTree(fileData)\n except:\n print('Check your input file. There was a misformat.')\n return\n # Plants the tree.\n self.tree.insert(root)\n \n def getSize(self):\n \"\"\" Called by GUI to display the tree.\n Returns the size of the tree.\n Both bars and objects count in the size. \"\"\"\n return self.tree.getSize()\n\n def getInfo(self, index):\n \"\"\" Called by GUI to display the tree.\n Get the info for a particular tree node.\n For these mobiles, the nodes in the tree are indexed as seen here:\n #1\n ---------------\n | |\n #2 #5\n ------- ----------\n | | | |\n #3 #4 #6 #7\n Hint: the node index is related to one of the tree walks. Which one?\n Hint: the size of each node's subtree may be helpful to know.\n \n Returns [\"Bar\", length, number] (for unbalanced bars)\n Returns [\"Bal\", leftLength, rightLength, number] ( for balanced bars)\n Returns [\"Obj\", weight] (for objects) \"\"\"\n return self.tree.search(index)\n \n def balance(self): # O(n^3 + 4n) ~= O(n^3)\n \"\"\" Called by GUI when button clicked.\n Balance the tree.\n Then print your output to output.txt. \"\"\"\n # Calls recursive balance method.\n self.tree.balance() # O(n^3)\n \n # Creates preorder traversal of tree and saves it to output file.\n self.tree.preorder() # O(n)\n \n # Creates postorder traversal of tree and saves it to output file.\n self.tree.postorder() # O(n)\n \n # Creates inorder traversal of tree and saves it to output file.\n self.tree.inorder() # O(n)\n \n # Creates a post balance result in the output file.\n self.tree.result() # O(n)\n\n# Put other classes here or in other files as needed. \n\n\"\"\" Launch the game. \"\"\"\nb = Balancer()\nb.start() # This does not return until the gui quits\n","sub_path":"csci204/Project Files/Project 3/Work Files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"598164796","text":"\"\"\"\nImplement simple training\n\"\"\"\n\n#* Import Libraries *#\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\n\n#* Register src directory path to PYTHONPATH *#\nimport sys\nfrom os import path, pardir\ncurrent_dir = path.abspath(path.dirname(__file__))\nparent_dir = path.abspath(path.join(current_dir, pardir))\nparent_parent_dir = path.abspath(path.join(parent_dir, pardir))\nsys.path.append(parent_dir)\n\n#* Import my Libraries *#\nfrom mylib import utils\nfrom mylib import preproc_utils\nfrom modeldev import d_layer\nfrom modeldev import d_model\nfrom modeldev import d_loss_func\nfrom modeldev import d_calc_loss_func\n\n#* Set params *#\n# general params\nrandom_state = 50\ntest_size_ratio = 0.1\nvalid_size_ratio = 0.1\n\n# training params\nvae_layer_dim_list = [13, 7, 5]\nvae_layer_type_list = [d_layer.Dense, d_layer.Drop]\nvae_act_func_list = [tf.nn.relu, tf.nn.relu6]\nvae_loss_func = tf.losses.mean_squared_error\nvae_optimizer = tf.train.AdamOptimizer\nvae_epochs = 10\nvae_kld_coef = 1.0\nmlp_layer_dim_list = [3, 1]\nmlp_layer_type_list = [d_layer.Dense, d_layer.Dense]\nmlp_act_func_list = [tf.nn.relu, tf.nn.sigmoid]\nmlp_loss_func = tf.losses.mean_squared_error\nmlp_optimizer = tf.train.AdamOptimizer\nmlp_epochs = 10\n\n\ndef simple_train_main():\n\n # load data\n dirname = parent_parent_dir + '/data/dataset'\n df_X, df_y = utils.load_dataset_X_y(dirname, 'pd')\n\n # ======================================================================= #\n # Data Preprocesing\n # ======================================================================= #\n\n # data preprocessing for all data\n # NOTE: do nothing curretly\n df_X = preproc_utils.data_preproc_base(df_X)\n\n # split into train, test and valid data\n split_data = preproc_utils.train_test_valid_split(df_X,\n df_y,\n test_size=test_size_ratio,\n valid_size=valid_size_ratio,\n random_state=random_state)\n df_train_X, df_test_X, df_valid_X, df_train_y, df_test_y, df_valid_y = split_data\n\n # data preprocessing for split data\n split_data = preproc_utils.data_preproc_split(df_train_X, \n df_valid_X,\n df_test_X)\n df_train_X, df_valid_X, df_test_X = split_data\n\n # data preprocessing for train data\n df_train_X, df_train_y = preproc_utils.data_preproc_train(df_train_X,\n df_train_y)\n\n\n # ======================================================================= #\n # Training\n # ======================================================================= #\n\n # define model\n model = d_model.Model(vae_layer_dim_list,\n vae_layer_type_list,\n vae_act_func_list,\n vae_loss_func,\n vae_optimizer,\n vae_epochs,\n vae_kld_coef,\n mlp_layer_dim_list,\n mlp_layer_type_list,\n mlp_act_func_list,\n mlp_loss_func,\n mlp_optimizer,\n mlp_epochs)\n\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # train\n sess = model.fit(sess, df_train_X.values, df_train_y.values,\n df_valid_X.values, df_valid_y.values)\n pred = model.predict(sess, df_test_X.values)\n\n error = np.mean((df_test_y.values - pred)**2)\n print('ERROR:{}'.format(error))\n\n print('END')\n\n\nif __name__ == '__main__':\n simple_train_main()","sub_path":"neuralnet/fine-tuning_transfer-learning/src/modeldev/simple_train.py","file_name":"simple_train.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"169184990","text":"# В диапазоне натуральных чисел от 2 до 99 определить,\n# сколько из них кратны любому из чисел в диапазоне от 2 до 9.\n\nresult = {}\nfor first_range_num in range(2, 10):\n result[first_range_num] = 0\n for second_range_num in range(2, 100):\n if first_range_num % second_range_num == 0:\n result[first_range_num] += 1\n\nall_nums = 0\nfor key, value in result.items():\n print(f'Число {key} кратно любому из чисел диапазона 2-99 {value} раз(а)')\n","sub_path":"Lesson3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"152638535","text":"from PIL import Image\nim=Image.open(\"lenna.png\")\n\nprint(im.format,im.size,im.mode)\n\nfrom PIL import ImageFilter\n\noutF = im.filter(ImageFilter.DETAIL)\nconF = im.filter(ImageFilter.CONTOUR)\nedgeF = im.filter(ImageFilter.FIND_EDGES)\nim.show()\noutF.show()\nconF.show()\nedgeF.show()\n\nfrom PIL import ImageEnhance\n\nimgE = Image.open(\"lenna.png\")\nimgEH = ImageEnhance.Contrast(imgE)\nimgE.show()\nimgEH.enhance(1.3).show(\"30% more contrast\")\nimgEH.enhance(1.8).show(\"80% more contrast\")\n","sub_path":"Pillow_Demo1.py","file_name":"Pillow_Demo1.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395580193","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import mock\n\nfrom google.cloud import _http\nfrom google.cloud.ndb import _datastore_api as _api\nfrom google.cloud.ndb import _runstate\nfrom google.cloud.ndb import tasklets\n\n\nclass TestStub:\n @staticmethod\n @mock.patch(\"google.cloud.ndb._datastore_api._helpers\")\n @mock.patch(\"google.cloud.ndb._datastore_api.datastore_pb2_grpc\")\n def test_secure_channel(datastore_pb2_grpc, _helpers):\n channel = _helpers.make_secure_channel.return_value\n client = mock.Mock(\n _credentials=\"creds\",\n secure=True,\n host=\"thehost\",\n spec=(\"_credentials\", \"secure\", \"host\"),\n )\n with _runstate.state_context(client):\n stub = _api.stub()\n assert _api.stub() is stub # one stub per context\n assert stub is datastore_pb2_grpc.DatastoreStub.return_value\n datastore_pb2_grpc.DatastoreStub.assert_called_once_with(channel)\n _helpers.make_secure_channel.assert_called_once_with(\n \"creds\", _http.DEFAULT_USER_AGENT, \"thehost\"\n )\n\n @staticmethod\n @mock.patch(\"google.cloud.ndb._datastore_api.grpc\")\n @mock.patch(\"google.cloud.ndb._datastore_api.datastore_pb2_grpc\")\n def test_insecure_channel(datastore_pb2_grpc, grpc):\n channel = grpc.insecure_channel.return_value\n client = mock.Mock(\n secure=False, host=\"thehost\", spec=(\"secure\", \"host\")\n )\n with _runstate.state_context(client):\n stub = _api.stub()\n assert stub is datastore_pb2_grpc.DatastoreStub.return_value\n datastore_pb2_grpc.DatastoreStub.assert_called_once_with(channel)\n grpc.insecure_channel.assert_called_once_with(\"thehost\")\n\n\ndef _mock_key(protobuf):\n key = mock.Mock(spec=(\"to_protobuf\",))\n key.to_protobuf.return_value = protobuf\n return key\n\n\ndef test_lookup(runstate):\n runstate.eventloop = mock.Mock(spec=(\"add_idle\", \"run\"))\n future1 = _api.lookup(_mock_key(\"foo\"))\n future2 = _api.lookup(_mock_key(\"foo\"))\n future3 = _api.lookup(_mock_key(\"bar\"))\n\n batch = runstate.batches[_api._BATCH_LOOKUP]\n assert batch[\"foo\"] == [future1, future2]\n assert batch[\"bar\"] == [future3]\n runstate.eventloop.add_idle.assert_called_once_with(\n _api._perform_batch_lookup\n )\n\n\nclass Test_perform_batch_lookup:\n @staticmethod\n @mock.patch(\"google.cloud.ndb._datastore_api._datastore_lookup\")\n def test_it(_datastore_lookup, runstate):\n runstate.eventloop = mock.Mock(spec=(\"queue_rpc\", \"run\"))\n runstate.batches[_api._BATCH_LOOKUP] = batch = {\n \"foo\": [\"one\", \"two\"],\n \"bar\": [\"three\"],\n }\n _api._perform_batch_lookup()\n _datastore_lookup.assert_called_once_with(batch.keys())\n rpc = _datastore_lookup.return_value\n call_args = runstate.eventloop.queue_rpc.call_args[0]\n assert call_args[0] == rpc\n assert call_args[1].batch is batch\n\n @staticmethod\n @mock.patch(\"google.cloud.ndb._datastore_api._datastore_lookup\")\n def test_it_no_batch(_datastore_lookup, runstate):\n runstate.eventloop = mock.Mock(spec=(\"queue_rpc\", \"run\"))\n _api._perform_batch_lookup()\n _datastore_lookup.assert_not_called()\n runstate.eventloop.queue_rpc.assert_not_called()\n\n\nclass TestBatchLookupCallback:\n @staticmethod\n def test_exception():\n future1, future2, future3 = (tasklets.Future() for _ in range(3))\n batch = {\"foo\": [future1, future2], \"bar\": [future3]}\n error = Exception(\"Spurious error.\")\n rpc = tasklets.Future()\n rpc.set_exception(error)\n callback = _api.BatchLookupCallback(batch)\n callback(rpc)\n\n assert future1.exception() is error\n assert future2.exception() is error\n\n @staticmethod\n def test_found():\n future1, future2, future3 = (tasklets.Future() for _ in range(3))\n batch = {\"foo\": [future1, future2], \"bar\": [future3]}\n entity1 = mock.Mock(key=\"foo\", spec=(\"key\",))\n entity2 = mock.Mock(key=\"bar\", spec=(\"key\",))\n response = mock.Mock(\n found=[\n mock.Mock(entity=entity1, spec=(\"entity\",)),\n mock.Mock(entity=entity2, spec=(\"entity\",)),\n ],\n missing=[],\n deferred=[],\n spec=(\"found\", \"missing\", \"deferred\"),\n )\n rpc = tasklets.Future()\n rpc.set_result(response)\n callback = _api.BatchLookupCallback(batch)\n callback(rpc)\n\n assert future1.result() is entity1\n assert future2.result() is entity1\n assert future3.result() is entity2\n\n @staticmethod\n def test_missing():\n future1, future2, future3 = (tasklets.Future() for _ in range(3))\n batch = {\"foo\": [future1, future2], \"bar\": [future3]}\n entity1 = mock.Mock(key=\"foo\", spec=(\"key\",))\n entity2 = mock.Mock(key=\"bar\", spec=(\"key\",))\n response = mock.Mock(\n missing=[\n mock.Mock(entity=entity1, spec=(\"entity\",)),\n mock.Mock(entity=entity2, spec=(\"entity\",)),\n ],\n found=[],\n deferred=[],\n spec=(\"found\", \"missing\", \"deferred\"),\n )\n rpc = tasklets.Future()\n rpc.set_result(response)\n callback = _api.BatchLookupCallback(batch)\n callback(rpc)\n\n assert future1.result() is _api._NOT_FOUND\n assert future2.result() is _api._NOT_FOUND\n assert future3.result() is _api._NOT_FOUND\n\n @staticmethod\n def test_deferred(runstate):\n runstate.eventloop = mock.Mock(spec=(\"add_idle\", \"run\"))\n future1, future2, future3 = (tasklets.Future() for _ in range(3))\n batch = {\"foo\": [future1, future2], \"bar\": [future3]}\n response = mock.Mock(\n missing=[],\n found=[],\n deferred=[\"foo\", \"bar\"],\n spec=(\"found\", \"missing\", \"deferred\"),\n )\n rpc = tasklets.Future()\n rpc.set_result(response)\n callback = _api.BatchLookupCallback(batch)\n callback(rpc)\n\n assert future1.running()\n assert future2.running()\n assert future3.running()\n\n assert runstate.batches[_api._BATCH_LOOKUP] == batch\n runstate.eventloop.add_idle.assert_called_once_with(\n _api._perform_batch_lookup\n )\n\n @staticmethod\n def test_found_missing_deferred(runstate):\n runstate.eventloop = mock.Mock(spec=(\"add_idle\", \"run\"))\n future1, future2, future3 = (tasklets.Future() for _ in range(3))\n batch = {\"foo\": [future1], \"bar\": [future2], \"baz\": [future3]}\n entity1 = mock.Mock(key=\"foo\", spec=(\"key\",))\n entity2 = mock.Mock(key=\"bar\", spec=(\"key\",))\n response = mock.Mock(\n found=[mock.Mock(entity=entity1, spec=(\"entity\",))],\n missing=[mock.Mock(entity=entity2, spec=(\"entity\",))],\n deferred=[\"baz\"],\n spec=(\"found\", \"missing\", \"deferred\"),\n )\n rpc = tasklets.Future()\n rpc.set_result(response)\n callback = _api.BatchLookupCallback(batch)\n callback(rpc)\n\n assert future1.result() is entity1\n assert future2.result() is _api._NOT_FOUND\n assert future3.running()\n\n assert runstate.batches[_api._BATCH_LOOKUP] == {\"baz\": [future3]}\n runstate.eventloop.add_idle.assert_called_once_with(\n _api._perform_batch_lookup\n )\n\n\n@mock.patch(\"google.cloud.ndb._datastore_api.datastore_pb2\")\ndef test__datastore_lookup(datastore_pb2, runstate):\n runstate.client = mock.Mock(project=\"theproject\", spec=(\"project\",))\n runstate.stub = mock.Mock(spec=(\"Lookup\",))\n runstate.stub.return_value = mock.Mock(spec=(\"future\",))\n _api._datastore_lookup([\"foo\", \"bar\"]) is runstate.stub.return_value\n\n datastore_pb2.LookupRequest.assert_called_once_with(\n project_id=\"theproject\", keys=[\"foo\", \"bar\"]\n )\n runstate.stub.Lookup.future.assert_called_once_with(\n datastore_pb2.LookupRequest.return_value\n )\n","sub_path":"ndb/tests/unit/test__datastore_api.py","file_name":"test__datastore_api.py","file_ext":"py","file_size_in_byte":8601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"285368823","text":"import json\nimport csv\nimport re\n\nfilepath='1022-1027.jsonl'\nwith open(filepath,'r',encoding='utf-8') as f:\n txt=f.read()\n\ncsv_file=csv.writer(open(filepath+'.csv','w',encoding='utf-8-sig',newline=''))\n\nitems=re.findall('(\\{\"_account\":.*?\"_id\":\".*?\"\\})',txt)\n\nfor item in items:\n data=json.loads(item)\n\n csv_file.writerow([data['_line']])\n csv_file.writerow([data['message']])\n\n\n\n","sub_path":"job-json,xml,csv/Tk-提取json数据/提取数据.py","file_name":"提取数据.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"320974107","text":"def solution(array, commands):\n answer = []\n answer2 = []\n for i in range(len(commands)):\n j = commands[i][0]-1\n k = commands[i][1]\n l = commands[i][2]-1\n answer = array[j:k]\n answer.sort()\n answer2.append(answer[l])\n return answer2\n\n\nif __name__ == '__main__':\n print(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))\n","sub_path":"programmers/sort_1.py","file_name":"sort_1.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"161643395","text":"from flask import Flask, render_template, request, url_for, redirect, Markup, jsonify, make_response, \\\n send_from_directory, session\nimport Searching, Parse\nimport datetime\nimport time\n\napp = Flask(__name__, static_url_path='/static')\n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef catch_complete(path):\n typeVal = request.args.get('type', None)\n if typeVal != None:\n if typeVal.lower() == 'all':\n typeVal = None\n if len(path) < 5:\n return path + \" is an invalid school name\"\n thread = \"https://talk.collegeconfidential.com/\" + path\n database, countVal = Searching.search_all(thread, typeVal)\n for keyName in database.keys():\n for i, val in enumerate(database[keyName]):\n database[keyName][i] = Parse.parse_html(val)\n database2 = []\n order = [\"accepted\", \"rejected\", \"unknown\"]\n totalCount = 0\n for k in order:\n info = {}\n info[\"decision\"] = k\n info[\"results\"] = sorted(database[k],\n key=lambda e: datetime.datetime(*time.strptime(e['dtString'], \"%Y-%m-%d\")[:6]),\n reverse=True)\n totalCount += len(info['results'])\n database2.append(info)\n if typeVal == None:\n typeVal = \"\"\n else:\n typeVal += \" \"\n schoolName = path.split(\"?\")[0].replace(\"-\", \" \").title()\n return render_template('results.html', schoolName=schoolName, typeVal=typeVal.title(), database=database2,\n choices=[database.keys()], resultCount=totalCount)\n\n@app.route('/handle_data', methods=['POST'])\ndef handle_data():\n thread = request.form['projectFilepath']\n database, countVal = Searching.search_all(thread)\n\n for keyName in database.keys():\n for i, val in enumerate(database[keyName]):\n database[keyName][i] = Parse.parse_html(val)\n database2 = []\n order = [\"accepted\", \"rejected\", \"unknown\"]\n for k in order:\n info = {}\n info[\"decision\"] = k\n info[\"results\"] = sorted(database[k],\n key=lambda e: datetime.datetime(*time.strptime(e['dtString'], \"%Y-%m-%d\")[:6]),\n reverse=True)\n database2.append(info)\n return render_template('results.html', database=database2, choices=[database.keys()])\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"540077881","text":"\ndef estritamente_crescente(lista):\n if lista == []:\n return lista\n else:\n inicio = lista[0]\n cresce=[inicio] \n for i in range(len(lista)-1):\n if lista[i+1]>inicio:\n inicio = lista[i+1]\n cresce.append(lista[i+1])\n return cresce\n \ndef estritamente_decrescente(lista):\n if lista == []:\n return lista\n else:\n inicio = lista[0]\n cresce=[inicio] \n for i in range(len(lista)-1):\n if lista[i+1]= 0:\n # offset is negative X\n return (current_offset + offset, dirs[current_offset+offset][0])\n else:\n return (None, None)\n else:\n if current_offset + offset > len(dirs)-1:\n # len is 1 based, current_offset is 0 based.\n return (None, None)\n else:\n return (current_offset + offset, dirs[current_offset+offset][0])\n\n\n#####################################################\n def directory_changed(self, scan_directory):\n \"\"\"\n Args:\n scan_directory (str): The fully qualified pathname to examine\n\n Returns:\n Boolean\n\n Return Values\n\n * True - The directory has changed since being added to the cache\n * False - The directory has **not** changed since being added to the cache\n\n Pass the target directory as scan_directory.\n\n There is two checks being made to decide on the validity of the cache.\n\n 1. Check the last modified time on the directory vs the\n **last_scanned_time** in the cached data.\n 2. Check the number of files and directories in the cached copy for any\n differences.\n\n \"\"\"\n scan_directory = os.path.realpath(scan_directory).strip()\n if self.directory_in_cache(scan_directory):\n # Is in cache\n st = os.stat(scan_directory)\n # Return true if modified time on directory is newer Cached Time.\n if \"last_scanned_time\" in self.d_cache[scan_directory]:\n if st[stat.ST_MTIME] > self.d_cache[scan_directory]\\\n [\"last_scanned_time\"]:\n return True\n\n #path, raw_dirc, raw_filec = scandir.walk(scan_directory).next()\n raw_filec, raw_dirc = self._return_total_fd_count(scan_directory)\n try:\n if self.d_cache[scan_directory][\"raw_filec\"] != raw_filec \\\n or self.d_cache[scan_directory][\"raw_dirc\"] != raw_dirc:\n return True\n# if self.d_cache[scan_directory][\"raw_filec\"] != len(raw_filec)\\\n# or self.d_cache[scan_directory][\"raw_dirc\"] != len(raw_dirc):\n# return True\n except exceptions.KeyError:\n pass\n return False\n else:\n # Does not exist in Cache, so force a load.\n return True\n\n#####################################################\n def smart_read(self, scan_directory):\n \"\"\"\n Args:\n scan_directory (str): The fully qualified pathname to examine\n\n Returns:\n True - Path Exists and/or Read was successful\n False - Path does *NOT* exist\n\n This is a wrapper around the Read and changed functions.\n\n The scan_directory is passed in, converted to a normalized form,\n and then checked to see if it exists in the cache.\n\n If it doesn't exist (or is expired), then it is read.\n\n If it already exists *AND* has not expired, it is not\n updated.\n\n **Net affect, this will ensure the directory is in cache, and\n update to date.**\n\n In addition, the clean_filename function has been merged into\n _scan_directory. It will check to see if any filenames in the\n ``scan_directory`` location need to be scrubbed / cleaned.\n\n This function uses the ``filter_filenames`` variable/pointer to\n check and scrub the filenames.\n\n If self.filter_filenames is set, this function will call\n self.filter_filenames to test against the file and directory names.\n\n This feature was added for the gallery, to automate the renaming\n of the directories and files, to ensure that the files and directory\n names are acceptable to the web server and web browser.\n\n By setting a ``filter_filenames`` function, you can use this as you\n choose.\n\n By default, this is not turned on. This is an opt-in feature.\n\n code::\n\n import common\n import file_types\n self.cdl = directory_caching.Cache()\n self.cdl.files_to_ignore = file_types.files_to_ignore\n self.cdl.acceptable_extensions = file_types.image_safe_files\n self.cdl.filter_filenames = common.clean_filename2\n print \"Priming the cache for %s, please wait\" %\\\n file_types.locations[\"albums_root\"].lower().strip()\n self.cdl.smart_read(\n file_types.locations[\"albums_root\"].lower().strip())\n print \"Pump primed.\"\n\n After assigning self.cdl.filter_filenames, every time a directory is\n examined by the caching engine, it will rename the files and directories\n if an invalid filename or directory name is found.\n\n This check is simply a comparison, the filename is passed to the cleaning\n function, and if the returned filename is different, the file is renamed\n to the new name.\n\n code::\n\n if orig_name[1].fq_filename != new_name:\n os.rename(orig_name[1].fq_filename, new_name)\n\n An example cleaning function, from the Gallery application.\n\n code::\n\n def clean_filename2(filename):\n replacements = {'\"':\"`\", \"'\":\"`\",\n \",\":\"\", \"#\":\"\",\n \"*\":\"\", \"@\":\"\",\n \":\":\"-\", \"|\":\"\"}\n filename = replace_all(urllib2.unquote(filename), replacements)\n # Un\"quotify\" the URL / Filename\n filename = unidecode.unidecode(filename)\n # de-unicode the filename / url\n filename, fileext = os.path.splitext(filename)\n filename = filename.strip() + fileext.strip()\n # remove extra spaces from filename and file extension.\n # e.g. \"this is the filename .txt\" -> \"this is the filename.txt\"\n return filename\n\n \"\"\"\n scan_directory = os.path.realpath(scan_directory).strip()\n if os.path.exists(scan_directory) != False:\n if self.directory_changed(scan_directory):\n self._scan_directory_list(scan_directory)\n return True\n else:\n return False\n\n#####################################################\n def return_sorted(self, scan_directory, sort_by=0, reverse=False):\n \"\"\"\n Args:\n scan_directory (str): The fully qualified pathname to examine\n sort_by (integer / constant):\n\n SORT_BY_NAME = 0\n SORT_BY_MODIFIED = 1\n SORT_BY_CREATION = 2\n\n reverse (bool): Is this an ascending or descending (**reverse**) sort\n\n Returns:\n Tupple: List of sorted Cache entries (text, scandir DirEntry)\n\n\n Return sorted list(s) from the Directory Cache for the\n Scanned directory, sorted by name.\n\n Returns 2 tuples of date, T[0] - Files, and T[1] - Directories\n which contain the data from the cached directory.\n\n \"\"\"\n self.sanity_check(scan_directory)\n scan_directory = os.path.realpath(scan_directory).strip()\n if self.smart_read(scan_directory) is False:\n return ([], [])\n if self.d_cache[scan_directory][\"last_sort\"] != sort_by:\n self.d_cache[scan_directory][\"last_sort\"] = sort_by\n files = self.d_cache[scan_directory][\"files\"]\n dirs = self.d_cache[scan_directory][\"dirs\"]\n if sort_by == SORT_BY_NAME:\n files = natsort.natsort(files.items(),\n key=lambda t: t[1].filename.lower(),\n reverse=reverse)\n dirs = natsort.natsort(dirs.items(),\n key=lambda t:\\\n t[1].directoryname.lower(),\n reverse=reverse)\n elif sort_by == SORT_BY_MODIFIED:\n files = sorted(files.items(),\\\n key=lambda t: t[1].st.st_mtime, reverse=reverse)\n dirs = sorted(dirs.items(),\\\n key=lambda t: t[1].st.st_mtime, reverse=reverse)\n elif sort_by == SORT_BY_CREATION:\n files = sorted(files.items(),\\\n key=lambda t: t[1].st.st_ctime, reverse=reverse)\n dirs = sorted(dirs.items(),\\\n key=lambda t: t[1].st.st_ctime, reverse=reverse)\n\n self.d_cache[scan_directory][\"sort_index\"] = files, dirs\n\n return self.d_cache[scan_directory][\"sort_index\"]\n\n\n#####################################################\n def sanity_check(self, scan_directory):\n scan_directory = os.path.realpath(scan_directory).strip()\n if not 'files' in self.d_cache[scan_directory]:\n self.d_cache[scan_directory] = {}\n self.d_cache[scan_directory][\"last_sort\"] = None\n elif not 'dirs' in self.d_cache[scan_directory]:\n self.d_cache[scan_directory] = {}\n self.d_cache[scan_directory][\"last_sort\"] = None\n\n\n def return_sort_name(self, scan_directory, reverse=False):\n \"\"\"\n Here for backward compatibility versus earlier versions of the library.\n This will eventually be removed.\n\n Args:\n scan_directory (str): The fully qualified pathname to examine\n reverse (bool): Is this an ascending or descending (**reverse**) sort\n\n Returns:\n Same as return_sorted.\n \"\"\"\n scan_directory = os.path.realpath(scan_directory).strip()\n self.sanity_check(scan_directory)\n return self.return_sorted(scan_directory,\n sort_by=SORT_BY_NAME,\n reverse=reverse)\n\n#####################################################\n def return_sort_lmod(self, scan_directory, reverse=False):\n \"\"\"\n Here for backward compatibility versus earlier versions of the library.\n This will eventually be removed.\n\n Args:\n scan_directory (str): The fully qualified pathname to examine\n reverse (bool): Is this an ascending or descending (**reverse**) sort\n\n Returns:\n Same as return_sorted.\n \"\"\"\n return self.return_sorted(scan_directory,\n sort_by=SORT_BY_MODIFIED,\n reverse=reverse)\n\n#####################################################\n def return_sort_ctime(self, scan_directory, reverse=False):\n \"\"\"\n Here for backward compatibility versus earlier versions of the library.\n This will eventually be removed.\n\n Args:\n scan_directory (str): The fully qualified pathname to examine\n reverse (bool): Is this an ascending or descending (**reverse**) sort\n\n Returns:\n Same as return_sorted.\n \"\"\"\n return self.return_sorted(scan_directory,\n sort_by=SORT_BY_CREATION,\n reverse=reverse)\n","sub_path":"directory_caching2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499721737","text":"def insertion_sort(arr):\n n = len(arr)\n for i in range(1,n):\n temp = arr[i]\n j = i-1\n\n while (j>=0 and arr[j]>temp):\n arr[j+1] = arr[j]\n j = j-1\n arr[j+1] = temp\n\n return arr\n\nres = insertion_sort([67,45,34,12,9])\nprint(res)","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"434053163","text":"import sys\nimport time\n\nfrom Buzhihuo.single.py.Loop import loop\nfrom util.HwndList import get\n\n\ndef main():\n print(\"欢迎来到阴阳师联盟!\")\n arg = 0\n tt = 0.00\n #获取所有阴阳师句柄\n list = get()\n while True:\n # time.sleep(tt) # 设置隔2秒运行一次\n\n #循环所有句柄\n for hd in list:\n loop(hd)\n\nif __name__==\"__main__\":\n main()\n\n\n\n\n\n\n","sub_path":"v2.0/Buzhihuo/single/py/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"167833203","text":"\"\"\"In this script, some functions have been combined and adapted from:\n\n - https://github.com/udacity/ud330/blob/master/Lesson4/step2/project.py\n - https://github.com/lobrown/Full-Stack-Foundations\n\"\"\"\n\n\nfrom flask import (Flask, render_template, request, redirect,\n jsonify, url_for, flash)\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import desc, and_\nfrom database_setup import Base, Category, CatalogItem, User\n\nfrom flask import session as login_session\nimport random\nimport string\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\nfrom forms import FormItem, FormCategory\n\n\napp = Flask(__name__)\n\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\nAPPLICATION_NAME = \"Catalog application\"\n\nengine = create_engine('sqlite:///categorycatalog.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# Create anti-forgery state token\n@app.route('/login')\ndef showLogin():\n \"\"\"Render the login page\"\"\"\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)\n\n\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n \"\"\"Connection via facebook\n Function retrieved from\n https://github.com/udacity/ud330/blob/master/Lesson4/step2/project.py\"\"\"\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n\n app_id = json.loads(open('fb_client_secrets.json', 'r').read())[\n 'web']['app_id']\n app_secret = json.loads(\n open('fb_client_secrets.json', 'r').read())['web']['app_secret']\n url = 'https://graph.facebook.com/oauth/access_token'\n url += '?grant_type=fb_exchange_token&client_id='\n url += '%s&client_secret=%s&fb_exchange_token=%s' % (\n app_id, app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n\n # Use token to get user info from API\n # userinfo_url = \"https://graph.facebook.com/v2.4/me\"\n # strip expire tag from access token\n token = result.split(\"&\")[0]\n\n url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n\n data = json.loads(result)\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n\n # The token must be stored in the login_session in order to properly\n # logout, let's strip out the information before the equals\n # sign in our token\n stored_token = token.split(\"=\")[1]\n login_session['access_token'] = stored_token\n\n # Get user picture\n url = 'https://graph.facebook.com/v2.4/me/picture'\n url += '?%s&redirect=0&height=200&width=200' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n\n login_session['picture'] = data[\"data\"][\"url\"]\n\n # see if user exists\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '
'\n output += '
'\n output += '
'\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += '/JSON')\ndef categoryCatalogJSON(category_name):\n \"\"\"Return a json with all categories and their attributes\n\n Returns:\n A json of all categories\"\"\"\n items = session.query(\n CatalogItem, Category).filter(and_(\n Category.name == category_name,\n CatalogItem.category_id == Category.id)).order_by(\n desc(CatalogItem.date_added)).all()\n return jsonify(CatalogItems=[i.serialize for i, _ in items])\n\n\n@app.route('/category/all/JSON')\ndef categoryCatalogJSONall():\n \"\"\"Return a json with all categories and their attributes\n\n Returns:\n A json of all categories\"\"\"\n items = session.query(CatalogItem).all()\n return jsonify(CatalogItems=[i.serialize for i in items])\n\n\n@app.route('/category///JSON')\ndef catalogItemJSON(category_name, item_name):\n \"\"\"Return a json with all categories and their attributes\n\n Returns:\n A json of all categories\"\"\"\n # we filter by item name and sort by date added in the db\n item = session.query(\n CatalogItem, Category).filter(and_(\n Category.name == category_name,\n CatalogItem.category_id == Category.id,\n CatalogItem.name == item_name)).order_by(\n desc(CatalogItem.date_added)).one()\n return jsonify(Catalog_Item=item[0].serialize)\n\n\n@app.route('/category/JSON')\ndef categoriesJSON():\n \"\"\"Return a json with all categories and their attributes\n\n Returns:\n A json of all categories\"\"\"\n categories = session.query(Category).all()\n return jsonify(categories=[r.serialize for r in categories])\n\n\n# Show all categories\n@app.route('/')\n@app.route('/category/')\ndef showCategories():\n \"\"\"Show categories and items\n\n Render the homepage.\n\n Returns:\n render the categories template if a GET request is sent\"\"\"\n categories = session.query(Category).all()\n items = session.query(\n CatalogItem, Category).filter(\n CatalogItem.category_id == Category.id).order_by(\n desc(CatalogItem.date_added)).all()\n return render_template('categories.html', categories=categories,\n items=items[:9])\n\n\n# Create a new category\n@app.route('/category/new/', methods=['GET', 'POST'])\ndef newCategory():\n \"\"\"Create a category\n\n Need to be logged in to be able to see the edit page.\n\n Args:\n category_name(str): the name of the category the object belongs to\n\n Returns:\n render the newCategory template if a GET request is sent\n redirect to the home page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n form = FormCategory()\n if form.validate_on_submit():\n newCategory = Category(name=form.name.data,\n image_loc=form.image_loc.data)\n session.add(newCategory)\n session.commit()\n return redirect(url_for('showCategories'))\n return render_template('newCategory.html', form=form)\n else:\n flash(\"Please login to be able to add a category\")\n return redirect(url_for('showCategories'))\n\n\n@app.route('/category//edit/', methods=['GET', 'POST'])\ndef editCategory(category_name):\n \"\"\"Edit a category\n\n Need to be logged in to be able to see the edit page.\n\n Args:\n category_name(str): the name of the category the object belongs to\n\n Returns:\n render the editCategory template if a GET request is sent\n redirect to the home page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n editedCategory = session.query(\n Category).filter_by(name=category_name).one()\n form = FormCategory()\n if form.validate_on_submit():\n if len(form.name.data) > 0:\n editedCategory.name = form.name.data\n if len(form.name.data) > 0:\n editedCategory.image_loc = form.image_loc.data\n return redirect(url_for('showCategories'))\n return render_template('editCategory.html',\n category=editedCategory,\n form=form)\n else:\n flash(\"Please login to be able to edit a category\")\n return redirect(url_for('showCategories'))\n\n\n@app.route('/category//delete/', methods=['GET', 'POST'])\ndef deleteCategory(category_name):\n \"\"\"Delete a category\n\n Need to be logged in to be able to see the delete page.\n\n Args:\n category_name(str): the name of the category the object belongs to\n\n Returns:\n render the deleteCategory template if a GET request is sent\n redirect to the home page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n categoryToDelete = session.query(\n Category).filter_by(name=category_name).one()\n # itemsToDelete = session.query(CatalogItem).filter_by(\n # category_id=categoryToDelete.id).delete()\n if request.method == 'POST':\n session.delete(categoryToDelete)\n session.commit()\n return redirect(url_for('showCategories'))\n return render_template('deleteCategory.html',\n category=categoryToDelete)\n else:\n flash(\"Please login to be able to delete a category\")\n return redirect(url_for('showCategories'))\n\n\n@app.route('/category//')\ndef showCategory(category_name):\n \"\"\"Show a category and all its items\n\n Args:\n category_name(str): the name of the category the item belongs to\n\n Returns:\n render the catalog template\"\"\"\n category = session.query(Category).filter_by(name=category_name).one()\n categories = session.query(Category).all()\n items = session.query(CatalogItem).filter_by(\n category_id=category.id).all()\n nb_items = len(items)\n text_item = 'item'\n if nb_items >= 2:\n text_item += 's'\n tuple_items = (nb_items, text_item)\n return render_template('catalog.html', items=items, category=category,\n categories=categories, tuple_items=tuple_items)\n\n\n@app.route('/category//')\ndef showCatalogItem(category_name, item_name):\n \"\"\"Show an item and all its attributes\n\n Args:\n category_name(str): the name of the category the item belongs to\n item_name(str): the name of the item\n\n Returns:\n render the catalogitem template\"\"\"\n item = session.query(CatalogItem).filter_by(name=item_name).one()\n category = session.query(Category).filter_by(name=category_name).one()\n return render_template('catalogitem.html', item=item,\n category=category)\n\n\n@app.route('/category//new/',\n methods=['GET', 'POST'])\ndef newCatalogItem(category_name):\n \"\"\"Create a new item in the database\n\n Args:\n category_name(str): the name of the category the item belongs to\n\n Returns:\n render the newcatalogitem template if a GET request is sent\n redirect to the category page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n form = FormItem()\n if form.validate_on_submit():\n category = session.query(Category).filter_by(\n name=category_name).one()\n newItem = CatalogItem(name=form.name.data,\n description=form.description.data,\n category_id=category.id,\n image_loc=form.image_loc.data)\n session.add(newItem)\n session.commit()\n return redirect(url_for('showCategory',\n category_name=category_name))\n return render_template('newcatalogitem.html',\n category_name=category_name, form=form)\n else:\n flash(\"Please login to be able to add a new item\")\n return redirect(url_for('showCategory', category_name=category_name))\n\n\n@app.route('/category///edit',\n methods=['GET', 'POST'])\ndef editCatalogItem(category_name, item_name):\n \"\"\"Edit the attributes of an item in the database\n\n Args:\n category_name(str): the name of the category the item belongs to\n item_name(str): the name of the item\n\n Returns:\n render the editcatalogitem template if a GET request is sent\n redirect to the category page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n editedItem = session.query(CatalogItem).filter_by(name=item_name).one()\n form = FormItem()\n if form.validate_on_submit():\n if hasattr(form, 'name'):\n editedItem.name = form.name.data\n if hasattr(form, 'description'):\n editedItem.description = form.description.data\n if hasattr(form, 'image_loc'):\n editedItem.image_loc = form.image_loc.data\n session.add(editedItem)\n session.commit()\n return redirect(url_for('showCategory',\n category_name=category_name))\n return render_template('editcatalogitem.html',\n category_name=category_name,\n item_name=item_name,\n item=editedItem,\n form=form)\n else:\n flash(\"Please login to be able to edit an item\")\n return redirect(url_for('showCategory', category_name=category_name))\n\n\n@app.route('/category///delete',\n methods=['GET', 'POST'])\ndef deleteCatalogItem(category_name, item_name):\n \"\"\"Deletes an item given a category and an item name\n Need to be logged in to be able to see the delete page.\n\n Args:\n category_name(str): the name of the category the object belongs to\n item_name(str): the name of the item\n\n Returns:\n render the deletecatalogitem template if a GET request is sent\n redirect to the category page if the POST request succeeds\"\"\"\n if 'provider' in login_session:\n itemToDelete = session.query(CatalogItem).filter_by(\n name=item_name).one()\n if request.method == 'POST':\n session.delete(itemToDelete)\n session.commit()\n return redirect(url_for('showCategory',\n category_name=category_name))\n else:\n return render_template('deletecatalogitem.html', item=itemToDelete,\n category_name=category_name)\n else:\n flash(\"Please login to be able to delete an item\")\n return redirect(url_for('showCategory', category_name=category_name))\n\n\nif __name__ == '__main__':\n # app.debug = True\n app.secret_key = 'super_secret_key'\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"vagrant/catalog/catalogitem.py","file_name":"catalogitem.py","file_ext":"py","file_size_in_byte":22123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"327727893","text":"import obspy\nfrom glob import glob\nimport os\nimport pandas as pd\nimport numpy as np\n\n\nch = '2'\ndatadir = '/share/home/goxu/hao_shijie/sichuan/ZlandDEC/'\nfreq = '0.2to16.0_COR/'\n\n\nstack_dir = '/scratch/goxu/hao_shijie/mongolia/data_conti/stack/'\ndata_dir = '/scratch/goxu/hao_shijie/mongolia/data_conti/stack/stack_*.lst/'\nfor d in glob(data_dir):\n print(d)\n for fpath in glob(d + 'COR_*.SAC'):\n sac = os.path.basename(fpath)\n st0 = obspy.read(fpath)\n if any(np.isnan(st0[0].data)):\n print(\"nan!! %s\" % fpath)\n continue\n st0[0].data = st0[0].data / np.max(st0[0].data)\n if os.path.exists(stack_dir + sac):\n st1 = obspy.read(stack_dir + sac)\n st1[0].data += st0[0].data\n st1[0].write(stack_dir + sac)\n else:\n st0[0].write(stack_dir + sac)\n","sub_path":"Scripts/bin/stack_cor.py","file_name":"stack_cor.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124127157","text":"__all__ = [\"show_record\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.data import *\nfrom .show_annotation import *\n\n\ndef show_record(\n record,\n label: bool = True,\n bbox: bool = True,\n mask: bool = True,\n ax: plt.Axes = None,\n show: bool = False,\n prepare_record=None,\n):\n data_preparer = prepare_record or default_prepare_record\n data = data_preparer(record)\n return show_annotation(\n data[\"img\"],\n labels=data[\"label\"] if (label and \"label\" in record) else None,\n bboxes=data[\"bbox\"] if (bbox and \"bbox\" in record) else None,\n masks=data[\"mask\"] if (mask and \"mask\" in record) else None,\n ax=ax,\n show=show,\n )\n","sub_path":"mantisshrimp/visualize/show_record.py","file_name":"show_record.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"126438797","text":"\"\"\"``kedro.framework.project`` module provides utitlity to\nconfigure a Kedro project and access its settings.\"\"\"\n# pylint: disable=redefined-outer-name,unused-argument,global-statement\nimport importlib\nimport operator\nfrom collections.abc import MutableMapping\nfrom typing import Dict, Optional\nfrom warnings import warn\n\nfrom dynaconf import LazySettings\nfrom dynaconf.validator import ValidationError, Validator\n\nfrom kedro.framework.hooks import get_hook_manager\nfrom kedro.framework.hooks.manager import _register_hooks, _register_hooks_setuptools\nfrom kedro.pipeline import Pipeline\n\n\ndef _get_default_class(class_import_path):\n module, _, class_name = class_import_path.rpartition(\".\")\n\n def validator_func(settings, validators):\n return getattr(importlib.import_module(module), class_name)\n\n return validator_func\n\n\nclass _IsSubclassValidator(Validator):\n \"\"\"A validator to check if the supplied setting value is a subclass of the default class\"\"\"\n\n def validate(self, settings, *args, **kwargs):\n super().validate(settings, *args, **kwargs)\n\n default_class = self.default(settings, self)\n for name in self.names:\n setting_value = getattr(settings, name)\n if not issubclass(setting_value, default_class):\n raise ValidationError(\n f\"Invalid value `{setting_value.__module__}.{setting_value.__qualname__}` \"\n f\"received for setting `{name}`. It must be a subclass of \"\n f\"`{default_class.__module__}.{default_class.__qualname__}`.\"\n )\n\n\nclass _ProjectSettings(LazySettings):\n \"\"\"Define all settings available for users to configure in Kedro,\n along with their validation rules and default values.\n Use Dynaconf's LazySettings as base.\n \"\"\"\n\n _CONF_ROOT = Validator(\"CONF_ROOT\", default=\"conf\")\n _HOOKS = Validator(\"HOOKS\", default=tuple())\n _CONTEXT_CLASS = Validator(\n \"CONTEXT_CLASS\",\n default=_get_default_class(\"kedro.framework.context.KedroContext\"),\n )\n _SESSION_STORE_CLASS = _IsSubclassValidator(\n \"SESSION_STORE_CLASS\",\n default=_get_default_class(\"kedro.framework.session.session.BaseSessionStore\"),\n )\n _SESSION_STORE_ARGS = Validator(\"SESSION_STORE_ARGS\", default={})\n _DISABLE_HOOKS_FOR_PLUGINS = Validator(\"DISABLE_HOOKS_FOR_PLUGINS\", default=tuple())\n\n def __init__(self, *args, **kwargs):\n\n kwargs.update(\n validators=[\n self._CONF_ROOT,\n self._HOOKS,\n self._CONTEXT_CLASS,\n self._SESSION_STORE_CLASS,\n self._SESSION_STORE_ARGS,\n self._DISABLE_HOOKS_FOR_PLUGINS,\n ]\n )\n super().__init__(*args, **kwargs)\n\n\ndef _load_data_wrapper(func):\n \"\"\"Wrap a method in _ProjectPipelines so that data is loaded on first access.\n Taking inspiration from dynaconf.utils.functional.new_method_proxy\n \"\"\"\n # pylint: disable=protected-access\n def inner(self, *args, **kwargs):\n self._load_data()\n return func(self._content, *args, **kwargs)\n\n return inner\n\n\nclass _ProjectPipelines(MutableMapping):\n \"\"\"A read-only lazy dictionary-like object to hold the project pipelines.\n On configure it will store the pipelines module.\n On first data access, e.g. through __getitem__, it will load the registered pipelines and merge\n them with pipelines defined from hooks.\n \"\"\"\n\n def __init__(self) -> None:\n self._pipelines_module: Optional[str] = None\n self._is_data_loaded = False\n self._content: Dict[str, Pipeline] = {}\n\n @staticmethod\n def _get_pipelines_registry_callable(pipelines_module: str):\n module_obj = importlib.import_module(pipelines_module)\n register_pipelines = getattr(module_obj, \"register_pipelines\")\n return register_pipelines\n\n def _load_data(self):\n \"\"\"Lazily read pipelines defined in the pipelines registry module\"\"\"\n\n # If the pipelines dictionary has not been configured with a pipelines module\n # or if data has been loaded\n if self._pipelines_module is None or self._is_data_loaded:\n return\n\n try:\n register_pipelines = self._get_pipelines_registry_callable(\n self._pipelines_module\n )\n except (ModuleNotFoundError, AttributeError) as exc:\n # for backwards compatibility with templates < 0.17.2\n # where no pipelines_registry is defined\n if self._pipelines_module in str(exc): # pragma: no cover\n project_pipelines = {}\n else:\n raise\n else:\n project_pipelines = register_pipelines()\n\n hook_manager = get_hook_manager()\n pipelines_dicts = (\n hook_manager.hook.register_pipelines() # pylint: disable=no-member\n )\n for pipeline_collection in pipelines_dicts:\n duplicate_keys = pipeline_collection.keys() & project_pipelines.keys()\n if duplicate_keys:\n warn(\n f\"Found duplicate pipeline entries. \"\n f\"The following will be overwritten: {', '.join(duplicate_keys)}\"\n )\n project_pipelines.update(pipeline_collection)\n\n self._content = project_pipelines\n self._is_data_loaded = True\n\n def configure(self, pipelines_module: str) -> None:\n \"\"\"Configure the pipelines_module to load the pipelines dictionary.\n Reset the data loading state so that after every `configure` call,\n data are reloaded.\n \"\"\"\n self._clear(pipelines_module)\n\n def _clear(self, pipelines_module: str) -> None:\n \"\"\"Helper method to clear the pipelines so new content will be reloaded\n next time data is accessed. Useful for testing purpose.\n \"\"\"\n self._is_data_loaded = False\n self._pipelines_module = pipelines_module\n\n # Dict-like interface\n __getitem__ = _load_data_wrapper(operator.getitem)\n __setitem__ = _load_data_wrapper(operator.setitem)\n __delitem__ = _load_data_wrapper(operator.delitem)\n __iter__ = _load_data_wrapper(iter)\n __len__ = _load_data_wrapper(len)\n\n # Presentation methods\n __repr__ = _load_data_wrapper(repr)\n __str__ = _load_data_wrapper(str)\n\n\nPACKAGE_NAME = None\n\nsettings = _ProjectSettings()\n\npipelines = _ProjectPipelines()\n\n\ndef configure_project(package_name: str):\n \"\"\"Configure a Kedro project by populating its settings with values\n defined in user's settings.py and pipeline_registry.py.\n \"\"\"\n settings_module = f\"{package_name}.settings\"\n settings.configure(settings_module)\n\n # set up all hooks so we can discover all pipelines\n hook_manager = get_hook_manager()\n _register_hooks(hook_manager, settings.HOOKS)\n _register_hooks_setuptools(hook_manager, settings.DISABLE_HOOKS_FOR_PLUGINS)\n\n pipelines_module = f\"{package_name}.pipeline_registry\"\n pipelines.configure(pipelines_module)\n\n # Once the project is successfully configured once, store PACKAGE_NAME as a\n # global variable to make it easily accessible. This is used by validate_settings()\n # below, and also by ParallelRunner on Windows, as package_name is required every\n # time a new subprocess is spawned.\n global PACKAGE_NAME\n PACKAGE_NAME = package_name\n\n\ndef validate_settings():\n \"\"\"Eagerly validate that the settings module is importable. This is desirable to\n surface any syntax or import errors early. In particular, without eagerly importing\n the settings module, dynaconf would silence any import error (e.g. missing\n dependency, missing/mislabelled pipeline), and users would instead get a cryptic\n error message ``Expected an instance of `ConfigLoader`, got `NoneType` instead``.\n More info on the dynaconf issue: https://github.com/rochacbruno/dynaconf/issues/460\n \"\"\"\n importlib.import_module(f\"{PACKAGE_NAME}.settings\")\n","sub_path":"kedro/framework/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"647152201","text":"# !/usr/local/bin/python3\n\nimport sys\nimport cnsts\n\nfrom board import check_board\n\ndef minimax(board, player):\n\t\"\"\" Uses the minimax algorithm to get the next best move\n Args:\n param1 (Board): the board in play\n param2 (player): the player a move must be generated for\n\t\t\t\n\t\tReturns:\n\t\t\tReturns a tuple of the form (best move, best score)\n\t\t\tIf best move is -1 then the board is complete and no move is possible\n\t\"\"\"\n\tplayer1, player2 = board.pieces\n\tpoints = {player1: 10, player2: -10, cnsts.DRAW: 0}\n\tbest_move = -1\n\tbest_score = -sys.maxsize if (player == player1) else sys.maxsize\n\twinner = check_board(board)\n\tif(winner == player1):\n\t\treturn (best_move, points[player1])\n\telif(winner == player2):\n\t\treturn (best_move, points[player2]) \n\telif(winner == cnsts.DRAW):\n\t\treturn (best_move, points[cnsts.DRAW])\n\telse:\n\t\tempty_positions = board.empty_spots()\n\t\tnumber_empty = len(empty_positions)\n\t\tfor i in range(0, number_empty):\n\t\t\tcurrent = empty_positions[i]\n\t\t\tx_coord, y_coord = current\n\t\t\tboard.add_move(player, x_coord, y_coord)\n\t\t\tif(player == player1):\n\t\t\t\tnew_move, new_score = minimax(board, player2)\n\t\t\t\tif(new_score > best_score):\n\t\t\t\t\tbest_score = new_score\n\t\t\t\t\tbest_move = current\n\t\t\telse:\n\t\t\t\tnew_move, new_score = minimax(board, player1)\n\t\t\t\tif(new_score < best_score):\n\t\t\t\t\tbest_score = new_score\n\t\t\t\t\tbest_move = current\n\n\t\t\tboard.reset_position(x_coord, y_coord)\n\n\t\treturn best_move, best_score\n","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274287452","text":"import os\nfrom unittest.case import TestCase\nfrom tests import BaseTestCase\nfrom mc_be.commons.scripts.acl.namespace import RegisterCommand, PatchCommand\nfrom mc_be import BASE_DIR\nfrom mc_be.blueprints.core.acl.services import permission_service\nfrom mc_be.blueprints.core.acl.models import RoleModel\nfrom werkzeug.exceptions import NotFound\n\n\nclass TestNamespaceCoreScript(BaseTestCase, TestCase):\n\n _multiprocess_shared_ = True\n\n def setUp(self):\n super(TestNamespaceCoreScript, self).setUp()\n self.register_command = RegisterCommand()\n self.patch_command = PatchCommand()\n\n def tearDown(self):\n super(TestNamespaceCoreScript, self).tearDown()\n with open(os.path.join(BASE_DIR, 'data', 'acl', 'core.yaml')) as f:\n self.register_command.run(file=f, drop=True, force_yes=True)\n\n def test_register_core(self):\n with open(os.path.join(BASE_DIR, 'data', 'acl', 'core.yaml')) as f:\n self.register_command.run(file=f, drop=True, force_yes=True)\n\n role = permission_service.load('admin-aggregator', namespace='core')\n self.assertIsInstance(role, RoleModel)\n\n def test_patch_core(self):\n with open(os.path.join(BASE_DIR, 'data', 'acl', 'core.yaml')) as f:\n self.register_command.run(file=f, drop=True, force_yes=True)\n\n with open(os.path.join(BASE_DIR, 'data', 'acl', 'smip', 'core.yaml')) as f:\n self.patch_command.run('core', f, drop=True, force_yes=True)\n\n with self.assertRaises(NotFound):\n permission_service.load('admin-aggregator', namespace='core')\n","sub_path":"mc-pybe-release-smip-R4/tests/commons/scripts/acl/tests_namespace.py","file_name":"tests_namespace.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"157899959","text":"from flask_babel import lazy_gettext as _ # noqa\nfrom wtforms import DecimalField as WtfDecimalFields\nfrom wtforms import IntegerField, SelectField, \\\n SelectMultipleField, StringField\nfrom wtforms.validators import Email, ValidationError, URL\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField\n\nfrom app.models.course import Course\nfrom app.models.education import Education\nfrom app.models.group import Group\n\n\nclass CustomFormSelectField(IntegerField):\n pass\n\n\nclass CourseSelectField(SelectField):\n def __init__(self, label='', validators=None, **kwargs):\n super(CourseSelectField, self).__init__(label, validators, **kwargs)\n self.coerce = int\n courses = Course.query.order_by(Course.name).all()\n self.choices = [(c.id, c.name) for c in courses]\n\n\nclass EducationSelectField(SelectField):\n def __init__(self, label='', validators=None, **kwargs):\n super(EducationSelectField, self).__init__(label, validators, **kwargs)\n self.coerce = int\n educations = Education.query.order_by(Education.name).all()\n self.choices = [(e.id, e.name) for e in educations]\n\n\nclass GroupSelectField(SelectField):\n def __init__(self, label='', validators=None, **kwargs):\n super(GroupSelectField, self).__init__(label, validators, **kwargs)\n self.coerce = int\n groups = Group.query.order_by(Group.name).all()\n self.choices = [(g.id, g.name) for g in groups]\n\n\nclass OrderedSelectMultipleField(SelectMultipleField):\n _choices_labels = None\n\n def iter_choices(self):\n if not self._choices_labels:\n self._choices_labels = {\n self.coerce(value): label for value, label in self.choices\n }\n\n if self.choices:\n selected = set(self.data) if self.data else set()\n unselected = set(value for value, _ in self.choices\n if value not in selected)\n\n for value in self.data:\n yield (value, self._choices_labels[value], True)\n\n for value in unselected:\n yield (value, self._choices_labels[value], False)\n\n\nclass OrderedQuerySelectMultipleField(QuerySelectMultipleField):\n def _get_data(self):\n formdata = self._formdata\n if formdata is not None:\n data = []\n object_dict = {\n pk: obj for pk, obj in self._get_object_list()\n }\n\n for pk in formdata:\n if pk in object_dict:\n data.append(object_dict[pk])\n else:\n self._invalid_formdata = True\n\n if formdata:\n self._invalid_formdata = True\n self._set_data(data)\n\n return self._data\n\n data = property(_get_data, QuerySelectMultipleField._set_data)\n\n def process_formdata(self, valuelist):\n self._formdata = valuelist\n\n def iter_choices(self):\n objects = self._get_object_list()\n object_to_pk = {\n obj: pk for pk, obj in objects\n }\n\n all_objects = set(obj for _, obj in objects)\n selected_objects = set(self.data)\n\n for obj in self.data:\n if obj in object_to_pk:\n yield (object_to_pk[obj], self.get_label(obj), True)\n\n for obj in all_objects - selected_objects:\n if obj in object_to_pk:\n yield (object_to_pk[obj], self.get_label(obj), False)\n\n\nclass DecimalField(WtfDecimalFields):\n\n def process_formdata(self, valuelist):\n if valuelist:\n valuelist[0] = valuelist[0].replace(\",\", \".\")\n return super(DecimalField, self).process_formdata(valuelist)\n\n\nclass EmailListField(StringField):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._email_validator = Email()\n\n def pre_validate(self, form):\n origdata = self.data\n self.data += \"@svia.nl\"\n try:\n self._email_validator(form, self)\n # The current version of WTForms does not check for spaces\n # and multiple '@' characters.\n # this is fixed but not released yet, so we do it ourselves\n if \" \" in self.data or \"@\" in origdata:\n raise ValidationError()\n except ValidationError:\n raise ValidationError(_('Invalid email list name.'))\n finally:\n self.data = origdata\n\n def process_formdata(self, valuelist):\n super().process_formdata([d.strip().lower() for d in valuelist])\n\n\nclass EmailField(StringField):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._email_validator = Email()\n\n def pre_validate(self, form):\n self._email_validator(form, self)\n # The current version of WTForms does not check for spaces\n # this is fixed but not released yet, so we do it ourselves\n if \" \" in self.data:\n raise ValidationError(self.gettext('Invalid email address.'))\n\n super().pre_validate(form)\n\n def process_formdata(self, valuelist):\n super().process_formdata([d.strip().lower() for d in valuelist])\n\n\nclass URLList(URL):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, form, field):\n original_field_data = field.data\n\n try:\n for uri in [uri.strip() for uri in field.data.split(\",\")]:\n field.data = uri\n message = self.message\n if message is None:\n message = field.gettext('Invalid URL.')\n\n match = super(URL, self).__call__(form, field, message)\n if not self.validate_hostname(match.group('host')):\n raise ValidationError(message)\n finally:\n field.data = original_field_data\n","sub_path":"app/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"375088712","text":"#!/usr/bin/env python\n\n\"\"\"\nProvides a context manager for talking to serial devices over term server\n\"\"\"\nfrom __future__ import (print_function, unicode_literals, division, absolute_import)\nimport socket\nfrom contextlib import contextmanager\n\n@contextmanager\ndef netdevice(host, port, timeout=5):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.settimeout(timeout)\n s.connect((host, port))\n yield s\n finally:\n s.close()\n","sub_path":"hcam_drivers/hardware/termserver.py","file_name":"termserver.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"144600060","text":"\"\"\"\nGiven the head of a Singly LinkedList, reverse the LinkedList. Write a function\nto return the new head of the reversed LinkedList.\n\"\"\"\n\n\ndef reverse(head):\n if not head or not head.next:\n return head\n\n prev, curr = None, head\n while curr:\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n return prev\n","sub_path":"linked list/reverse and rotate/1.reverse_linked_list.py","file_name":"1.reverse_linked_list.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"487303905","text":"from collections import Counter\ndef is_palyndrome_permutation(string):\n counter =Counter()\n for char in string:\n counter[char]+=1\n \n find_odd = False\n \n for word in counter:\n if counter[word] %2 == 1:\n if find_odd:\n return False\n find_odd = True\n return True\n \n \n","sub_path":"cracking_the_code/chapter_1/is_palyndrome_permutation.py","file_name":"is_palyndrome_permutation.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"313041605","text":"# -*- coding: utf-8 -*-\n\nfrom object_model.model import Model\n\nmodel=Model()\nmodel.create('mydev4.mdo')\nmodel.open('mydev4.mdo')\nmodel.set_unit('N_m_C')\n\nmodel.add_material('Q345B',7849,'isotropic_elastic',\n E=2e11,mu=0.3)\n#model.add_frame_section('1-L-O400x20','Q345B','O',[0.4,0.02])\nmodel.add_frame_section('1-L-H400x200x14x20','Q345B','I',[0.4,0.2,0.014,0.02])\n\nmodel.add_loadcase('S','static-linear',1.)\nmodel.add_loadcase('D','static-linear',0)\nmodel.add_loadcase('L','static-linear',0)\nmodel.add_loadcase('Modal','modal',0)\n\nf1=model.add_frame((0,0,0),(5,5,5),'1-L-H400x200x14x20')\nf2=model.add_frame((5,5,0),(5,5,5),'1-L-H400x200x14x20')\nf3=model.add_frame((5,5,5),(10,0,5),'1-L-H400x200x14x20')\n\n#model.merge_point(6)\n\nmodel.get_point_name_by_coor(z=0)\npt0=model.get_point_name_by_coor(0,0,0)[0]\npt1=model.get_point_name_by_coor(10,0,5)[0]\n\nmodel.set_point_restraint(pt0,[True]*6)\nmodel.set_point_load(pt1,'D',[0,0,-100000,0,0,0])\nmodel.set_point_load(pt1,'L',[0,0,-50000,0,0,0])\n\n#model.save()\nmodel.mesh()\nmodel.run(['S','D','L'])\n\n#print(model.get_result_point_reaction(pt0,'D'))\n#print(model.get_result_frame_force(f1,'D')[0][:6])\nprint(model.get_result_period('Modal'))\n\n#model.save()\n#model.close()\n\n#import test.beam_test as bt\n#bt.cantilever_beam_test()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"93515701","text":"# Lists examples\n# Create Empty Lists\narray1 = []\narray2 = []\n\n# Populate Lists\narray1.append(\"Monday\")\narray1.append(\"Tuesday\")\narray1.append(\"Wednesday\")\narray1.append(\"Thursday\")\narray2.append(\"Friday\")\narray2.append(\"Saturday\")\narray2.append(\"Sunday\")\n\n# Add list together to create new list,leaves original alone\n# arrayadded = array1 + array2\n\n# Concatenate Lists\narray1.extend(array2)\n\n#Remove list elements\n#array1.pop()\n\n# Loop Through and print\n# i = 0\n# while i < len(array1):\n# print(array1[i])\n# i += 1\n\n# # slice syntax\n# values[1:3] Index 1 through index 3.\n# values[2:-1] Index 2 through index one from last.\n# values[:2] Start through index 2.\n# values[2:] Index 2 through end.\n# values[::2] Start through end, skipping ahead 2 places each time.\n# values[:] all values\n\n# +---+---+---+---+---+---+---+\n# | M | T | W | T | F | S | S |\n# +---+---+---+---+---+---+---+\n# 0 1 2 3 4 5 6 7\n# x -7 -6 -5 -4 -3 -2 -1\n#Slice example\nslice = array1[:]\nprint (slice)\n#Get list length\nlenarray = len(array1)\nprint (lenarray)\n\n#unpack list into variable\nday1, day2, day3, day4, day5, day6, day7 = (array1)\n\nprint (day3)\n","sub_path":"python/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396007926","text":"import pygame, sys\nfrom paddle import Paddle\nfrom ball import Ball\nfrom brick import Brick\n\n\nclass ArkanoidGame(object):\n\n def __init__(self, width=800, height=600):\n # Game initialization\n pygame.init()\n pygame.font.init()\n self.tps_max = 100.0\n self.screen = pygame.display.set_mode((width, height))\n pygame.display.set_caption('Arkanoid')\n self.resolution = self.screen.get_size()\n self.tps_clock = pygame.time.Clock()\n self.tps_delta = 0.0\n self.play = False\n self.is_game_over = False\n self.bricks_rows = 3\n self.bricks_cols = self.bricks_rows + 1\n self.lives = 3\n\n # Game objects initialization\n self.player = Paddle(self)\n self.ball = Ball(self)\n self.bricks = []\n\n #Creating bricks (columns, rows)\n self.createBricks(self.bricks_cols, self.bricks_rows)\n\n # Game loop\n while not self.is_game_over:\n\n # Handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n elif event.type == pygame.KEYDOWN and not event.key == pygame.K_ESCAPE:\n self.play = True\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n sys.exit(0)\n\n # Ticking\n self.tps_delta += self.tps_clock.tick() / 1000.0\n while self.tps_delta > 1 / self.tps_max:\n self.tick()\n self.tps_delta -= 1 / self.tps_max\n\n # Rendering\n self.screen.fill((0, 0, 0))\n self.draw()\n pygame.display.flip()\n\n def tick(self):\n if self.play:\n self.player.tick()\n self.ball.tick()\n self.ball.bounceOffPaddle(self.player)\n for brick in self.bricks:\n if self.ball.isBouncedOffBrick(brick):\n self.bricks.remove(brick)\n if self.ball.pos.y > self.screen.get_size()[1]:\n if self.lives == 1:\n self.is_game_over = True\n else:\n self.lives -= 1\n self.resetLevel()\n if len(self.bricks) == 0:\n self.nextLevel()\n\n def draw(self):\n self.ball.draw()\n for brick in self.bricks:\n brick.draw()\n self.player.draw()\n myfont = pygame.font.SysFont('Comic Sans MS', 30)\n textsurface = myfont.render('Lives: '+str(self.lives), False, (255, 255, 255))\n self.screen.blit(textsurface, (self.resolution[0]*0.02, self.resolution[1]*0.95))\n\n def createBricks(self, cols=4, rows=3):\n w = self.screen.get_size()[0]\n h = self.screen.get_size()[1]\n dx = w/cols\n y = 0\n dy = (h/2)/rows\n while y <= h/2-1:\n x = 0\n while x <= w-1:\n self.bricks.append(Brick(self, x+dx/10, y+dy/8, (4*dx)/5, (3*dy)/4))\n x += dx\n y += dy\n\n def resetLevel(self):\n self.play = False\n self.is_game_over = False\n\n self.player = Paddle(self)\n self.ball = Ball(self)\n self.bricks = []\n self.createBricks(self.bricks_cols, self.bricks_rows)\n\n def nextLevel(self):\n self.bricks_rows += 1;\n self.bricks_cols += 1;\n if self.lives != 3:\n self.lives += 1\n self.resetLevel()\n\n\n# Running game\nif __name__ == \"__main__\":\n while True:\n ArkanoidGame()","sub_path":"arkanoid.py","file_name":"arkanoid.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495705399","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nfrom numpy import linalg as LA\n\nimport unittest\n\n\n# In[27]:\n\n\n\nclass GradientDescent:\n \n \n def gradient(self,function,x,delta_val):\n \"\"\"\n function: a lambda function as the input to compute function(x) and function(x+delta)\n x: the input to the function it can be numpy array of any length\n delta_val: this is refering to the delta value in calculating the gradient\n for example for 1 dimenstion delta:= d in (f(x+d)-f(x))/d \n \"\"\"\n n=len(x)\n delta = delta_val*np.eye(n)# defining len(x) dimention with value of delta_val in the diognal to calculate the gradient\n \n return np.array([ (function(x+delta[i])-function(x))/delta_val for i in range(n)])\n \n def gradientDescent(self, function, initial_point, iterations=10000, learning_rate=0.1, delta_val=0.01,decay=None):\n if type(initial_point)!=np.ndarray:\n raise ValueError(\"Only accepting ndarrays as input, please update your function\")\n theta=initial_point\n for i in range(iterations):\n if(decay):\n #Updating the learning rate if the decay function were presented\n learning_rate=decay(learning_rate,i)\n theta=theta-learning_rate*self.gradient(function,theta,delta_val)\n return theta\n \n\n\n# In[48]:\n\n\nclass TestGD(unittest.TestCase):\n \n iterations=500\n efficiency_iterations=100000\n \n def f(self,x):\n return x[0]*x[0]+2*x[1]*x[1]\n \n def decay(self,learning_rate,i):\n return learning_rate*(10*i+1)/(10*i+2)\n \n def test_gradient(self):\n gd=GradientDescent()\n gradient=gd.gradient(self.f,[3,2],0.01)\n assert LA.norm(gradient-[6 ,8]) < 0.03\n \n def test_gradientDescent(self):\n gd=GradientDescent()\n min_val=gd.gradientDescent(self.f,np.array([5,5]),self.iterations,0.1,0.0001)\n assert LA.norm(min_val-[0 ,0]) < 0.03\n \n def test_gradientDescent_eff(self):\n gd=GradientDescent()\n min_val=gd.gradientDescent(self.f,np.array([20,20]),self.efficiency_iterations,0.1,0.0001)\n assert LA.norm(min_val-[0 ,0]) < 0.03\n \n def test_ValueError(self):\n gd=GradientDescent()\n with self.assertRaises(ValueError) as context:\n min_val=gd.gradientDescent(self.f,5)\n \n def test_gradientDescent_decay(self):\n gd=GradientDescent()\n min_val=gd.gradientDescent(self.f,np.array([20,20]),self.iterations,0.9,0.0001,self.decay)\n assert LA.norm(min_val-[0 ,0]) < 0.03 \n \n def regression_J_cost_function(self,th):\n X_train=np.array([[i] for i in range(40)])\n Y_train=np.array([[i] for i in range(40)])\n m=X_train.shape[0]\n th=th.reshape([1,1])\n return ((X_train@th.T-Y_train).T@(X_train@th.T-Y_train))[0][0]/(2*m) \n \n def test_against_regression(self):\n theta_0=np.array([0 for i in range(1)])\n gd=GradientDescent()\n theta=gd.gradientDescent(self.regression_J_cost_function, theta_0,iterations=500,learning_rate=0.0001,delta_val=0.0001)\n assert LA.norm(theta-[1]) < 0.03 \n \nif __name__== '__main__':\n unittest.main(argv=['first-arg-is-ignored'], exit=False)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"GradientDescent.py","file_name":"GradientDescent.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"332217262","text":"from bglib.scrapers import *\n\nclass FourGames(GenericScraper):\n def __init__(self):\n\n self.startpagenr = 2\n self.url = 'https://4-games.se/produkt-kategori/spel/page/%d/'\n self.firstpageurl = 'https://4-games.se/produkt-kategori/spel/'\n\n self.gamesoup = {'name': {'funcs': (lambda x: x.find(\"h2\").text,\\\n lambda x: ' '.join(re.split('\\s+', x, flags=re.UNICODE)))},\\\n 'price': {'funcs': (lambda x: x.find(\"span\", {\"class\": \"price\"}).text.replace(\"kr\", \"\" ).replace(\",\", \"\").strip(),)},\\\n 'stock':{'funcs': (lambda x: x.find(\"a\", {\"class\": \"button add_to_cart_button product_type_simple\"}) is not None,)}}\n self.gamelistsoup = {'funcs':(lambda x: x.find_all(\"li\", {\"class\": lambda L: L and L.find('product type-product') > -1 }),)}\n\n self.pagemaxnr = {'funcs':(lambda x: int(x.find('nav', {'class': 'woocommerce-pagination'}).find_all('a')[-2].text),)}\n self.parsetype = 'soup'\n super().__init__()\n","sub_path":"stores/fourgames.py","file_name":"fourgames.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"495166845","text":"# coding: utf-8\n\nfrom matplotlib import cm\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import norm\nimport numpy as np\nimport Cpip\nimport pandas as pd\nimport time\n\nX = np.load(\"../data/IDS-2017-feature.npy\")\n\nmaxAE = 10\nFMgrace = 50000\nADgrace = 150000\nLSTMgrace = 100000\n\nrms = []\nlts = []\nprint(\"Running CPIP:\")\nstart = time.time()\nC = Cpip.CPIP(X.shape[1], FMgrace, ADgrace, LSTMgrace)\n\n\nfor i in range(X.shape[0]):\n if (i+1) % 1000 == 0:\n print(i+1)\n rm, lt = C.process(X[i, ])\n rms.append(rm)\n lts.append(lt)\nstop = time.time()\nprint(\"Complete. Time elapsed: \" + str(stop - start))\n\nprms = np.array(rms[200000:])\nplts = np.array(lts[200000:])\nscores = np.zeros(400000)\n\nscores[:100000] = 2 * np.exp(10 * prms[:100000])\nscores[100000:] = np.exp(10 * prms[100000:]) + np.exp(10 * plts[100000:])\nindex = np.array(range(len(scores)))\nbenignSample = np.log(scores[:50000])\nlogProbs = norm.logsf(np.log(scores), np.mean(\n benignSample), np.std(benignSample))\n\nfig3 = plt.figure(figsize=(12.8, 6.4))\nplt.scatter(index, scores, s=4,\n c=logProbs, cmap='RdYlGn')\nplt.ylim([min(scores), max(scores)+1.5])\nplt.annotate('Normal Traffic', xy=(index[26000], 3), xytext=(\n index[0], max(scores)), arrowprops=dict(facecolor='black', shrink=0.005), fontsize='large')\nplt.annotate('DDoS Attack Traffic', xy=(index[100000], max(scores)), xytext=(\n index[0], max(scores)+1), arrowprops=dict(facecolor='black', shrink=0.005), fontsize='large')\n\nplt.xlabel(\"indexs of packets\")\nplt.ylabel(\"anomaly score\")\n\n\nplt.savefig(\"./result.png\")\nplt.show()\nC.LSTM.save(\"../LSTM_demo/lstm.h5\")\n\n\n","sub_path":"IDS2017.py","file_name":"IDS2017.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"588075039","text":"# Class-based application configuration\nclass Config(object):\n \"\"\" Flask application config \"\"\"\n\n # Flask settings\n SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'\n\n # Flask-MongoEngine settings\n MONGODB_SETTINGS = {\n 'db': 'tst_app',\n 'host': 'mongodb://localhost:27017/tst_app'\n }\n","sub_path":"smart_sentry/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"256106800","text":"\"\"\"\nOrderedSelectors contains GroupSelector classes of defined order of selection.\n\n.. inheritance-diagram:: fullrmc.Selectors.OrderedSelectors\n :parts: 1\n\"\"\"\n\n# standard libraries imports\n\n# external libraries imports\nimport numpy as np\n\n# fullrmc imports=\nfrom fullrmc.Globals import INT_TYPE, FLOAT_TYPE, LOGGER\nfrom fullrmc.Core.Collection import is_integer, is_number\nfrom fullrmc.Core.GroupSelector import GroupSelector\n\n\nclass DefinedOrderSelector(GroupSelector):\n \"\"\"\n DefinedOrderSelector is a group selector with a defined order of selection.\n \n :Parameters:\n #. engine (None, fullrmc.Engine): The selector RMC engine.\n #. order (None, list, set, tuple, numpy.ndarray): The selector order of groups.\n If None, order is set automatically to all groups indexes list.\n \"\"\"\n \n def __init__(self, engine, order=None):\n # initialize GroupSelector\n super(DefinedOrderSelector, self).__init__(engine=engine)\n # set order\n self.set_order(order)\n # initialize selector\n self.__initialize_selector__()\n \n def __initialize_selector__(self):\n if self.__order is None:\n self.__index = None\n else:\n self.__index = 0\n \n def _runtime_initialize(self):\n \"\"\" \n Automatically sets the selector order at the engine runtime.\n \"\"\"\n assert self.engine is not None, LOGGER.error(\"engine must be set prior to calling _runtime_initialize\")\n if self.__order is None:\n self.__order = np.array(range(len(self.engine.groups)), dtype=INT_TYPE)\n self.__initialize_selector__()\n \n @property\n def order(self):\n \"\"\" List copy of the order of selection.\"\"\"\n if self.__order is None:\n order = None\n else:\n order = list(self.__order)\n return order\n \n @property\n def index(self):\n \"\"\"The current selection index.\"\"\"\n return self.__index\n \n def set_order(self, order):\n \"\"\"\n Set selector groups order.\n \n :Parameters:\n #. order (None, list, set, tuple, numpy.ndarray): The selector order of groups.\n \"\"\"\n if order is None:\n newOrder = None\n else:\n assert isinstance(order, (list, set, tuple, np.ndarray)), LOGGER.error(\"order must a instance among list, set, tuple or numpy.ndarray\")\n if isinstance(order, np.ndarray):\n assert len(order.shape)==1, LOGGER.error(\"order numpy.ndarray must have one dimension\")\n order = list(order)\n assert len(order)>0, LOGGER.error(\"order can't be empty\")\n newOrder = []\n for idx in order:\n assert is_integer(idx), LOGGER.error(\"order indexes must be integers\")\n idx = int(idx)\n assert idx>=0, LOGGER.error(\"order indexes must be positive\")\n assert idx\n \n \n \"\"\"\n \n def __init__(self, engine, center=None, expand=True,\n adjustMoveGenerators=False,\n generatorsParams={\"TG\":{\"amplitude\":0.1, \"damping\":0.1, \"angle\":90},\n \"RG\":{\"amplitude\":10}}):\n # initialize GroupSelector\n super(DirectionalOrderSelector, self).__init__(engine=engine, order=None)\n # set center\n self.set_center(center)\n # set expand\n self.set_expand(expand) \n # set expand\n self.set_adjust_move_generators(adjustMoveGenerators) \n # set expand\n self.set_generators_parameters(generatorsParams) \n \n def _runtime_initialize(self):\n \"\"\" \n Automatically sets the selector order at the engine runtime.\n \"\"\"\n diffs = np.array([(np.sum(self.engine.realCoordinates[g.indexes], axis=0)/len(g))-self.__center for g in self.engine.groups], dtype=FLOAT_TYPE)\n dists = np.array([np.sqrt(np.add.reduce(diff**2)) for diff in diffs])\n order = np.argsort(dists).astype(INT_TYPE)\n if self.__expand:\n order = [o for o in reversed(order)]\n # set order\n self.set_order(order)\n # set groups move generators\n if self.__adjustMoveGenerators:\n from fullrmc.Core.MoveGenerator import MoveGeneratorCollector\n from fullrmc.Generators.Rotations import RotationGenerator\n from fullrmc.Generators.Translations import TranslationTowardsCenterGenerator\n TG_amp = self.__generatorsParams['TG']['amplitude']\n TG_ang = self.__generatorsParams['TG']['angle']\n TG_dam = self.__generatorsParams['TG']['damping']\n RG_ang = self.__generatorsParams['RG']['amplitude']\n maxDist = FLOAT_TYPE(np.max(dists))\n TG_ampInterval = TG_amp-TG_amp*TG_dam\n for idx in range(len(self.engine.groups)):\n g = self.engine.groups[idx]\n damping = ((maxDist-dists[idx])/maxDist)*TG_ampInterval\n coll = [TranslationTowardsCenterGenerator(center={\"fixed\":self.__center}, amplitude=TG_amp-damping, angle=TG_ang, direction=not self.__expand)]\n if len(g) > 1:\n coll.append(RotationGenerator(amplitude=RG_ang))\n mg = MoveGeneratorCollector(collection=coll, randomize=True)\n g.set_move_generator( mg )\n \n @property\n def expand(self):\n \"\"\" expand flag.\"\"\"\n return self.__expand\n \n @property\n def center(self):\n \"\"\" center (X,Y,Z) coordinates.\"\"\"\n return self.__center\n \n @property\n def adjustMoveGenerators(self):\n \"\"\" adjustMoveGenerators flag.\"\"\"\n return self.__adjustMoveGenerators \n \n @property\n def generatorsParams(self):\n \"\"\" Automatic generators parameters.\"\"\"\n return self.__generatorsParams \n \n def set_generators_parameters(self, generatorsParams):\n \"\"\"\n Set move generators parameters.\n \n #. generatorsParams (None, dict): The automatically created moves generators parameters.\n If None is given, default parameters are used. If a dictionary is given, only two keys are allowed.\n 'TG' key is for TranslationTowardsCenterGenerator parameters and 'RG' key is\n for RotationGenerator parameters. TranslationTowardsCenterGenerator amplitude parameter\n is not the same for all groups but intelligently allowing certain groups to move more than\n others according to damping parameter.\n \n **Parameters are the following:**\\n\n * TG_amp = generatorsParams['TG']['amplitude']: Used for TranslationTowardsCenterGenerator amplitude parameters.\n * TG_ang = generatorsParams['TG']['angle']: Used as TranslationTowardsCenterGenerator angle parameters.\n * TG_dam = generatorsParams['TG']['damping']: Also used for TranslationTowardsCenterGenerator amplitude parameters.\n * RG_ang = generatorsParams['RG']['amplitude']: Used as RotationGenerator angle parameters.\n \n **Parameters are used as the following:**\\n\n * TG = TranslationTowardsCenterGenerator(center={\"fixed\":center}, amplitude=AMPLITUDE, angle=TG_ang)\\n\n Where TG_amp < AMPLITUDE < TG_amp.TG_dam\n * RG = RotationGenerator(amplitude=RG_ang) \n * MoveGeneratorCollector(collection=[TG,RG], randomize=True)\n \n **NB: The parameters are not checked for errors until engine runtime.** \n \"\"\"\n if generatorsParams is None:\n generatorsParams = {}\n assert isinstance(generatorsParams, dict), LOGGER.error(\"generatorsParams must be a python dictionary\")\n newGenParams = {\"TG\":{\"amplitude\":0.1, \"damping\":0.1, \"angle\":90},\n \"RG\":{\"amplitude\":10}}\n # update TranslationTowardsCenterGenerator values\n for gkey in newGenParams.keys():\n if not generatorsParams.has_key(gkey):\n continue\n assert isinstance(generatorsParams[gkey], dict), LOGGER.error(\"generatorsParams value must be a python dictionary\")\n for key in newGenParams[gkey].keys():\n newGenParams[gkey][key] = generatorsParams[gkey].get(key, newGenParams[gkey][key])\n # check generatorsParams damping parameters\n assert is_number(generatorsParams[\"TG\"][\"damping\"]), LOGGER.error(\"generatorsParams['TG']['damping'] must be a number\")\n generatorsParams[\"TG\"][\"damping\"] = FLOAT_TYPE(generatorsParams[\"TG\"][\"damping\"])\n assert generatorsParams[\"TG\"][\"damping\"]>=0, LOGGER.error(\"generatorsParams['TG']['damping'] must be bigger than 0\")\n assert generatorsParams[\"TG\"][\"damping\"]<=1, LOGGER.error(\"generatorsParams['TG']['damping'] must be smaller than 1\")\n # set generatorsParams\n self.__generatorsParams = newGenParams \n \n def set_center(self, center):\n \"\"\"\n Set the center.\n \n :Parameters:\n #. center (None, list, set, tuple, numpy.ndarray): The center of expansion.\n If None, the center is automatically set to the origin (0,0,0).\n \"\"\"\n if center is None:\n center = np.array((0,0,0), dtype=FLOAT_TYPE)\n else:\n assert isinstance(center, (list, set, tuple, np.ndarray)), LOGGER.error(\"center must a instance among list, set, tuple or numpy.ndarray\")\n if isinstance(center, np.ndarray):\n assert len(center.shape)==1,LOGGER.error(\"center numpy.ndarray must have one dimension\")\n center = list(center)\n assert len(center) == 3, LOGGER.error(\"center must have exactly three items\")\n assert is_number(center[0]), LOGGER.error(\"center items must be numbers\")\n assert is_number(center[1]), LOGGER.error(\"center items must be numbers\")\n assert is_number(center[2]), LOGGER.error(\"center items must be numbers\")\n center = np.array(([float(c) for c in center]), dtype=FLOAT_TYPE)\n # set center\n self.__center = center\n\n def set_expand(self, expand): \n \"\"\"\n Set expand.\n \n :Parameters:\n #. expand (bool): Whether to set the order from the the further to the closest \n or from the closest to the further if it is set to False. \n \"\"\" \n assert isinstance(expand, bool), LOGGER.error(\"expand must be boolean\")\n self.__expand = expand\n \n def set_adjust_move_generators(self, adjustMoveGenerators):\n \"\"\"\n Set expand.\n \n :Parameters:\n #. adjustMoveGenerators (bool): If set to True, all groups move generator instances will\n be changed automatically at engine runtime to a MoveGeneratorCollector of \n TranslationTowardsCenterGenerator and a randomRotation (for only more than 2 atoms groups). \n Generators parameters can be given by generatorsParams. It is advisable to \n set this flag to True in order to take advantage of an automatic and intelligent directional moves. \n \"\"\" \n assert isinstance(adjustMoveGenerators, bool), LOGGER.error(\"adjustMoveGenerators must be boolean\")\n self.__adjustMoveGenerators = adjustMoveGenerators\n \n ","sub_path":"Selectors/OrderedSelectors.py","file_name":"OrderedSelectors.py","file_ext":"py","file_size_in_byte":15191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"407829726","text":"import numpy as np\r\nimport itertools\r\nimport scipy.spatial as spspatial\r\nimport scipy.sparse as spsparse\r\n\r\ndef simpvol(p, t):\r\n \"\"\"Signed volumes of the simplex elements in the mesh.\"\"\"\r\n dim = p.shape[1]\r\n if dim == 1:\r\n d01 = p[t[:,1]]-p[t[:,0]]\r\n return d01\r\n elif dim == 2:\r\n d01 = p[t[:,1]]-p[t[:,0]]\r\n d02 = p[t[:,2]]-p[t[:,0]]\r\n return (d01[:,0]*d02[:,1]-d01[:,1]*d02[:,0])/2\r\n else:\r\n raise NotImplementedError\r\n \r\ndef simpqual(p, t):\r\n \"\"\"Simplex quality.\r\n\r\n Usage\r\n -----\r\n q = simpqual(p, t)\r\n\r\n Parameters\r\n ----------\r\n p : array, shape (np, dim)\r\n nodes\r\n t : array, shape (nt, dim+1)\r\n triangulation\r\n\r\n Returns\r\n -------\r\n q : array, shape (nt, )\r\n qualities\r\n \"\"\"\r\n assert (p.ndim == 2\r\n and t.ndim == 2\r\n and p.shape[1]+1 == t.shape[1])\r\n\r\n dim = p.shape[1]\r\n if dim == 1:\r\n return np.ones((1, nt))\r\n\r\n elif dim == 2:\r\n length = lambda p1: np.sqrt((p1**2).sum(1))\r\n a = length(p[t[:,1]]-p[t[:,0]])\r\n b = length(p[t[:,2]]-p[t[:,0]])\r\n c = length(p[t[:,2]]-p[t[:,1]])\r\n r = 0.5*np.sqrt((b+c-a)*(c+a-b)*(a+b-c)/(a+b+c))\r\n R = a*b*c/np.sqrt((a+b+c)*(b+c-a)*(c+a-b)*(a+b-c))\r\n return 2*r/R\r\n\r\n else:\r\n raise NotImplementedError\r\n \r\n\r\n\r\ndef fixmesh(p, t, ptol=2e-13):\r\n \"\"\"Remove duplicated/unused nodes and fix element orientation.\r\n\r\n Parameters\r\n ----------\r\n p : array, shape (np, dim)\r\n t : array, shape (nt, nf)\r\n\r\n Usage\r\n -----\r\n p, t = fixmesh(p, t, ptol)\r\n \"\"\"\r\n snap = (p.max(0)-p.min(0)).max()*ptol\r\n _, ix, jx = unique_rows(np.round(p/snap)*snap, True, True)\r\n\r\n p = p[ix]\r\n t = jx[t]\r\n\r\n flip = simpvol(p,t)<0\r\n t[flip, :2] = t[flip, 1::-1]\r\n\r\n return p, t\r\n\r\n\r\ndef dense(I, J, S, shape=None, dtype=None):\r\n \"\"\"\r\n Similar to MATLAB's SPARSE(I, J, S, ...), but instead returning a\r\n dense array.\r\n\r\n Usage\r\n -----\r\n >>> shape = (m, n)\r\n >>> A = dense(I, J, S, shape, dtype)\r\n \"\"\"\r\n\r\n # Advanced usage: allow J and S to be scalars.\r\n if np.isscalar(J):\r\n x = J\r\n J = np.empty(I.shape, dtype=int)\r\n J.fill(x)\r\n if np.isscalar(S):\r\n x = S\r\n S = np.empty(I.shape)\r\n S.fill(x)\r\n\r\n # Turn these into 1-d arrays for processing.\r\n S = S.flat; I = I.flat; J = J.flat\r\n return spsparse.coo_matrix((S, (I, J)), shape, dtype).toarray()\r\n \r\n\r\n\r\ndef setdiff_rows(A, B, return_index=False):\r\n \"\"\"\r\n Similar to MATLAB's setdiff(A, B, 'rows'), this returns C, I\r\n where C are the row of A that are not in B and I satisfies\r\n C = A[I,:].\r\n\r\n Returns I if return_index is True.\r\n \"\"\"\r\n A = np.require(A, requirements='C')\r\n B = np.require(B, requirements='C')\r\n\r\n assert A.ndim == 2, \"array must be 2-dim'l\"\r\n assert B.ndim == 2, \"array must be 2-dim'l\"\r\n assert A.shape[1] == B.shape[1], \\\r\n \"arrays must have the same number of columns\"\r\n assert A.dtype == B.dtype, \\\r\n \"arrays must have the same data type\"\r\n\r\n # NumPy provides setdiff1d, which operates only on one dimensional\r\n # arrays. To make the array one-dimensional, we interpret each row\r\n # as being a string of characters of the appropriate length.\r\n orig_dtype = A.dtype\r\n ncolumns = A.shape[1]\r\n dtype = np.dtype((np.character, orig_dtype.itemsize*ncolumns))\r\n C = np.setdiff1d(A.view(dtype), B.view(dtype)) \\\r\n .view(A.dtype) \\\r\n .reshape((-1, ncolumns), order='C')\r\n if return_index:\r\n raise NotImplementedError\r\n else:\r\n return C\r\n\r\ndef unique_rows(A, return_index=False, return_inverse=False):\r\n \"\"\"\r\n Similar to MATLAB's unique(A, 'rows'), this returns B, I, J\r\n where B is the unique rows of A and I and J satisfy\r\n A = B[J,:] and B = A[I,:]\r\n\r\n Returns I if return_index is True\r\n Returns J if return_inverse is True\r\n \"\"\"\r\n A = np.require(A, requirements='C')\r\n assert A.ndim == 2, \"array must be 2-dim'l\"\r\n\r\n orig_dtype = A.dtype\r\n ncolumns = A.shape[1]\r\n dtype = np.dtype((np.character, orig_dtype.itemsize*ncolumns))\r\n B, I, J = np.unique(A.view(dtype),\r\n return_index=True,\r\n return_inverse=True)\r\n\r\n B = B.view(orig_dtype).reshape((-1, ncolumns), order='C')\r\n\r\n # There must be a better way to do this:\r\n if (return_index):\r\n if (return_inverse):\r\n return B, I, J\r\n else:\r\n return B, I\r\n else:\r\n if (return_inverse):\r\n return B, J\r\n else:\r\n return B\r\n\r\n\r\ndef distmesh2d(fd, fh, h0, bbox, pfix=None, fig='gcf'):\r\n \"\"\"\r\n distmesh2d: 2-D Mesh Generator using Distance Functions.\r\n\r\n Usage\r\n -----\r\n >>> p, t = distmesh2d(fd, fh, h0, bbox, pfix)\r\n\r\n Parameters\r\n ----------\r\n fd: Distance function d(x,y)\r\n fh: Scaled edge length function h(x,y)\r\n h0: Initial edge length\r\n bbox: Bounding box, (xmin, ymin, xmax, ymax)\r\n pfix: Fixed node positions, shape (nfix, 2)\r\n fig: Figure to use for plotting, or None to disable plotting.\r\n\r\n Returns\r\n -------\r\n p: Node positions (Nx2)\r\n t: Triangle indices (NTx3)\r\n\r\n Example: (Uniform Mesh on Unit Circle)\r\n >>> fd = lambda p: sqrt((p**2).sum(1))-1.0\r\n >>> p, t = distmesh2d(fd, huniform, 2, (-1,-1,1,1))\r\n\r\n Example: (Rectangle with circular hole, refined at circle boundary)\r\n >>> fd = lambda p: ddiff(drectangle(p,-1,1,-1,1), dcircle(p,0,0,0.5))\r\n >>> fh = lambda p: 0.05+0.3*dcircle(p,0,0,0.5)\r\n >>> p, t = distmesh2d(fd, fh, 0.05, (-1,-1,1,1),\r\n [(-1,-1), (-1,1), (1,-1), (1,1)])\r\n\r\n Example: (Polygon)\r\n >>> pv=[(-0.4, -0.5), (0.4, -0.2), (0.4, -0.7), (1.5, -0.4), (0.9, 0.1),\r\n (1.6, 0.8), (0.5, 0.5), (0.2, 1.0), (0.1, 0.4), (-0.7, 0.7),\r\n (-0.4, -0.5)]\r\n >>> fd = lambda p: dpoly(p, pv)\r\n >>> p, t = distmesh2d(fd, huniform, 0.1, (-1,-1, 2,1), pv)\r\n\r\n Example: (Ellipse)\r\n >>> fd = lambda p: p[:,0]**2/2**2 + p[:,1]**2/1**2 - 1\r\n >>> p, t = dm.distmesh2d(fd, dm.huniform, 0.2, (-2,-1, 2,1))\r\n\r\n Example: (Square, with size function point and line sources)\r\n >>> fd = lambda p: dm.drectangle(p,0,1,0,1)\r\n >>> fh = lambda p: np.minimum(np.minimum(\r\n 0.01+0.3*abs(dm.dcircle(p,0,0,0)),\r\n 0.025+0.3*abs(dm.dpoly(p,[(0.3,0.7),(0.7,0.5)]))), 0.15)\r\n >>> p, t = dm.distmesh2d(fd, fh, 0.01, (0,0,1,1), [(0,0),(1,0),(0,1),(1,1)])\r\n\r\n Example: (NACA0012 airfoil)\r\n >>> hlead=0.01; htrail=0.04; hmax=2; circx=2; circr=4\r\n >>> a=.12/.2*np.array([0.2969,-0.1260,-0.3516,0.2843,-0.1036])\r\n >>> a0=a[0]; a1=np.hstack((a[5:0:-1], 0.0))\r\n >>> fd = lambda p: dm.ddiff(\r\n dm.dcircle(p,circx,0,circr),\r\n (abs(p[:,1])-np.polyval(a1, p[:,0]))**2-a0**2*p[:,0])\r\n >>> fh = lambda p: np.minimum(np.minimum(\r\n hlead+0.3*dm.dcircle(p,0,0,0),\r\n htrail+0.3*dm.dcircle(p,1,0,0)), hmax)\r\n\r\n >>> fixx = 1.0-htrail*np.cumsum(1.3**np.arange(5))\r\n >>> fixy = a0*np.sqrt(fixx)+np.polyval(a1, fixx)\r\n >>> fix = np.vstack((\r\n np.array([(circx-circr,0),(circx+circr,0),\r\n (circx,-circr),(circx,circr),\r\n (0,0),(1,0)]),\r\n np.vstack((fixx, fixy)).T,\r\n np.vstack((fixx, -fixy)).T))\r\n >>> box = (circx-circr,-circr, circx+circr,circr)\r\n >>> h0 = min(hlead, htrail, hmax)\r\n >>> p, t = dm.distmesh2d(fd, fh, h0, box, fix)\r\n \"\"\"\r\n\r\n dptol=.001; ttol=.1; Fscale=1.2; deltat=.2; geps=.001*h0;\r\n deps=np.sqrt(np.finfo(np.double).eps)*h0;\r\n densityctrlfreq=30;\r\n\r\n # Extract bounding box\r\n xmin, ymin, xmax, ymax = bbox\r\n if pfix is not None:\r\n pfix = np.array(pfix, dtype='d')\r\n\r\n # 1. Create initial distribution in bounding box (equilateral triangles)\r\n x, y = np.mgrid[xmin:(xmax+h0):h0,\r\n ymin:(ymax+h0*np.sqrt(3)/2):h0*np.sqrt(3)/2]\r\n x[:, 1::2] += h0/2 # Shift even rows\r\n p = np.vstack((x.flat, y.flat)).T # List of node coordinates\r\n\r\n # 2. Remove points outside the region, apply the rejection method\r\n p = p[fd(p) ttol: # Any large movement?\r\n pold = p.copy() # Save current positions\r\n t = spspatial.Delaunay(p).vertices # List of triangles\r\n pmid = p[t].sum(1)/3 # Compute centroids\r\n t = t[fd(pmid) < -geps] # Keep interior triangles\r\n # 4. Describe each bar by a unique pair of nodes\r\n bars = np.vstack((t[:, [0,1]],\r\n t[:, [1,2]],\r\n t[:, [2,0]])) # Interior bars duplicated\r\n bars.sort(axis=1)\r\n bars = unique_rows(bars) # Bars as node pairs\r\n # 5. Graphical output of the current mesh\r\n\r\n # 6. Move mesh points based on bar lengths L and forces F\r\n barvec = p[bars[:,0]] - p[bars[:,1]] # List of bar vectors\r\n L = np.sqrt((barvec**2).sum(1)) # L = Bar lengths\r\n hbars = fh(p[bars].sum(1)/2)\r\n L0 = (hbars*Fscale\r\n *np.sqrt((L**2).sum()/(hbars**2).sum())) # L0 = Desired lengths\r\n\r\n # Density control - remove points that are too close\r\n if (count % densityctrlfreq) == 0 and (L0 > 2*L).any():\r\n ixdel = np.setdiff1d(bars[L0 > 2*L].reshape(-1), np.arange(nfix))\r\n p = p[np.setdiff1d(np.arange(N), ixdel)]\r\n N = p.shape[0]; pold = float('inf')\r\n continue\r\n\r\n F = L0-L; F[F<0] = 0 # Bar forces (scalars)\r\n Fvec = F[:,None]/L[:,None].dot([[1,1]])*barvec # Bar forces (x,y components)\r\n Ftot = dense(bars[:,[0,0,1,1]],\r\n np.repeat([[0,1,0,1]], len(F), axis=0),\r\n np.hstack((Fvec, -Fvec)),\r\n shape=(N, 2))\r\n Ftot[:nfix] = 0 # Force = 0 at fixed points\r\n p += deltat*Ftot # Update node positions\r\n\r\n # 7. Bring outside points back to the boundary\r\n d = fd(p); ix = d>0 # Find points outside (d>0)\r\n if ix.any():\r\n dgradx = (fd(p[ix]+[deps,0])-d[ix])/deps # Numerical\r\n dgrady = (fd(p[ix]+[0,deps])-d[ix])/deps # gradient\r\n dgrad2 = dgradx**2 + dgrady**2\r\n p[ix] -= (d[ix]*np.vstack((dgradx, dgrady))/dgrad2).T # Project\r\n\r\n # 8. Termination criterion: All interior nodes move less than dptol (scaled)\r\n if (np.sqrt((deltat*Ftot[d<-geps]**2).sum(1))/h0).max() < dptol:\r\n break\r\n\r\n # Clean up and plot final mesh\r\n p, t = fixmesh(p, t)\r\n\r\n return p, t\r\n\r\ndef uniform_mesh_on_unit_circle(h0):\r\n \"\"\"Uniform Mesh on Unit Circle\"\"\"\r\n fd = lambda p: np.sqrt((p**2).sum(1))-1.0\r\n def huniform(p):\r\n \"\"\"Implements the trivial uniform mesh size function h=1.\"\"\"\r\n return np.ones(p.shape[0])\r\n #return distmesh2d(fd, huniform, h0, (-1,-1,1,1),pfix=[[0,0]])\r\n return distmesh2d(fd, huniform, h0, (-1,-1,1,1))\r\n\r\ndef uniform_mesh_on_circle(R,h0):\r\n \"\"\"Uniform Mesh on Unit Circle\"\"\"\r\n fd = lambda p: np.sqrt((p**2).sum(1))-R\r\n def huniform(p):\r\n \"\"\"Implements the trivial uniform mesh size function h=1.\"\"\"\r\n return np.ones(p.shape[0])\r\n return distmesh2d(fd, huniform, h0, (-R,-R,R,R))\r\n\r\n\r\nimport math\r\ndef convert_K_to_RGB(colour_temperature):\r\n \"\"\"\r\n Converts from K to RGB, algorithm courtesy of \r\n http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/\r\n \"\"\"\r\n #range check\r\n if colour_temperature < 1000: \r\n colour_temperature = 1000\r\n elif colour_temperature > 40000:\r\n colour_temperature = 40000\r\n \r\n tmp_internal = colour_temperature / 100.0\r\n \r\n # red \r\n if tmp_internal <= 66:\r\n red = 255\r\n else:\r\n tmp_red = 329.698727446 * math.pow(tmp_internal - 60, -0.1332047592)\r\n if tmp_red < 0:\r\n red = 0\r\n elif tmp_red > 255:\r\n red = 255\r\n else:\r\n red = tmp_red\r\n \r\n # green\r\n if tmp_internal <=66:\r\n tmp_green = 99.4708025861 * math.log(tmp_internal) - 161.1195681661\r\n if tmp_green < 0:\r\n green = 0\r\n elif tmp_green > 255:\r\n green = 255\r\n else:\r\n green = tmp_green\r\n else:\r\n tmp_green = 288.1221695283 * math.pow(tmp_internal - 60, -0.0755148492)\r\n if tmp_green < 0:\r\n green = 0\r\n elif tmp_green > 255:\r\n green = 255\r\n else:\r\n green = tmp_green\r\n\r\n#-----------------------------------------------------------------------------\r\n# Functions\r\n#-----------------------------------------------------------------------------\r\n\r\ndef distmeshnd(fd, fh, h0, bbox, pfix=None, fig='gcf'):\r\n \"\"\"\r\n distmeshnd: N-D Mesh Generator using Distance Functions.\r\n\r\n Usage\r\n -----\r\n >>> p, t = distmesh2d(fd, fh, h0, bbox, pfix)\r\n\r\n Parameters\r\n ----------\r\n fd: Distance function d(x,y)\r\n fh: Scaled edge length function h(x,y)\r\n h0: Initial edge length\r\n bbox: Bounding box, (xmin, ymin, zmin, ..., xmax, ymax, zmax, ...)\r\n pfix: Fixed node positions, shape (nfix, dim)\r\n fig: Figure to use for plotting, or None to disable plotting.\r\n\r\n Returns\r\n -------\r\n p: Node positions (np, dim)\r\n t: Triangle indices (nt, dim+1)\r\n\r\n Example: (Unit ball)\r\n >>> dim = 3\r\n >>> fd = lambda p: sqrt((p**2).sum(1))-1.0\r\n >>> bbox = np.vstack((-np.ones(dim), np.ones(dim)))\r\n >>> p, t = distmeshnd(fd, huniform, 2, bbox)\r\n \"\"\"\r\n\r\n bbox = np.array(bbox).reshape(2, -1)\r\n dim = bbox.shape[1]\r\n\r\n ptol=.001; ttol=.1; L0mult=1+.4/2**(dim-1); deltat=.1; geps=1e-1*h0;\r\n deps=np.sqrt(np.finfo(np.double).eps)*h0;\r\n\r\n if pfix is not None:\r\n pfix = np.array(pfix, dtype='d')\r\n nfix = len(pfix)\r\n else:\r\n pfix = np.empty((0, dim))\r\n nfix = 0\r\n\r\n # 0. Prepare a figure\r\n\r\n # 1. Create initial distribution in bounding box (equilateral triangles)\r\n p = np.mgrid[tuple(slice(min, max+h0, h0) for min, max in bbox.T)]\r\n p = p.reshape(dim, -1).T\r\n\r\n # 2. Remove points outside the region, apply the rejection method\r\n p = p[fd(p) ttol: # Any large movement?\r\n pold = p.copy() # Save current positions\r\n t = spspatial.Delaunay(p).vertices # List of triangles\r\n pmid = p[t].sum(1)/(dim+1) # Compute centroids\r\n t = t[fd(pmid) < -geps] # Keep interior triangles\r\n # 4. Describe each bar by a unique pair of nodes\r\n bars = np.vstack((t[:,pair] for pair in\r\n itertools.combinations(range(dim+1), 2)))\r\n bars.sort(axis=1)\r\n bars = unique_rows(bars) # Bars as node pairs\r\n # 5. Graphical output of the current mesh\r\n\r\n # 6. Move mesh points based on bar lengths L and forces F\r\n barvec = p[bars[:,0]] - p[bars[:,1]] # List of bar vectors\r\n L = np.sqrt((barvec**2).sum(1)) # L = Bar lengths\r\n hbars = fh(p[bars].sum(1)/2)\r\n L0 = (hbars*L0mult*((L**dim).sum()/(hbars**dim).sum())**(1.0/dim))\r\n F = L0-L; F[F<0] = 0 # Bar forces (scalars)\r\n Fvec = F[:,None]/L[:,None].dot(np.ones((1,dim)))*barvec # Bar forces (x,y components)\r\n Ftot = dense(bars[:,[0]*dim+[1]*dim],\r\n np.repeat([list(range(dim))*2], len(F), axis=0),\r\n np.hstack((Fvec, -Fvec)),\r\n shape=(N, dim))\r\n Ftot[:nfix] = 0 # Force = 0 at fixed points\r\n p += deltat*Ftot # Update node positions\r\n\r\n # 7. Bring outside points back to the boundary\r\n d = fd(p); ix = d>0 # Find points outside (d>0)\r\n if ix.any():\r\n def deps_vec(i): a = [0]*dim; a[i] = deps; return a\r\n dgrads = [(fd(p[ix]+deps_vec(i))-d[ix])/deps for i in range(dim)]\r\n dgrad2 = sum(dgrad**2 for dgrad in dgrads)\r\n p[ix] -= (d[ix]*np.vstack(dgrads)/dgrad2).T # Project\r\n\r\n # 8. Termination criterion: All interior nodes move less than dptol (scaled)\r\n maxdp = deltat*np.sqrt((Ftot[d<-geps]**2).sum(1)).max()\r\n if maxdp < ptol*h0:\r\n break\r\n\r\n return p, t\r\n\"\"\"\r\n Example: (Unit ball)\r\n >>> dim = 3\r\n >>> fd = lambda p: sqrt((p**2).sum(1))-1.0\r\n >>> bbox = np.vstack((-np.ones(dim), np.ones(dim)))\r\n >>> p, t = distmeshnd(fd, huniform, 2, bbox)\r\n\"\"\"\r\n\r\ndef uniform_mesh_on_unit_ball(h0):\r\n \"\"\"Uniform Mesh on Unit Circle\"\"\"\r\n dim = 3\r\n fd = lambda p: np.sqrt((p**2).sum(1))-1.0\r\n bbox = np.vstack((-np.ones(dim), np.ones(dim)))\r\n def huniform(p):\r\n \"\"\"Implements the trivial uniform mesh size function h=1.\"\"\"\r\n return np.ones(p.shape[0])\r\n return distmeshnd(fd, huniform, h0, bbox)\r\n #return distmesh2d(fd, huniform, h0, (-1,-1,1,1))\r\n\r\n \r\n","sub_path":"femmd/src/pylibs/mdmesh.py","file_name":"mdmesh.py","file_ext":"py","file_size_in_byte":18773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"86163806","text":"#!/usr/bin/python3\n\"\"\" Engine to handle objects with ORM SQLAlchemy \"\"\"\n\nimport models\nfrom models.base_model import BaseModel, Base\nfrom models.user import User\nfrom models.amenity import Amenity\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nimport sqlalchemy\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom os import getenv\nclasses = [State, City, User, Review, Place]\n\n\nclass DBStorage:\n \"\"\" SQLAlchemy database storage class \"\"\"\n\n __engine = None\n __session = None\n\n classes = {\n 'User': User, 'Place': Place, 'State': State,\n 'City': City, # 'Amenity': Amenity,\n 'Review': Review\n }\n\n def __init__(self):\n \"\"\" DBStorage: Instantance initialization \"\"\"\n\n user = getenv('HBNB_MYSQL_USER')\n password = getenv('HBNB_MYSQL_PWD')\n host = getenv('HBNB_MYSQL_HOST')\n database = getenv('HBNB_MYSQL_DB')\n\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(user, password, host, database),\n pool_pre_ping=True)\n\n if getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\"query on the current database session depending of the class name\"\"\"\n\n objs_dict = {}\n if cls:\n my_query = self.__session.query(cls).all()\n for obj in my_query:\n objs_dict[cls + \".\" + obj.id] = obj\n else:\n for key, value in self.classes.items():\n my_query = self.__session.query(value).all()\n for obj in my_query:\n objs_dict[key + \".\" + obj.id] = obj\n\n return objs_dict\n\n def new(self, obj):\n \"\"\" Adds the object to the current SQL session \"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\" Commit changes comming from the current SQL session \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" Deletes the object from the current SQL session \"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\" Creates the current database session and all its tables \"\"\"\n Base.metadata.create_all(self.__engine)\n session_factory = sessionmaker(\n bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(session_factory)\n self.__session = Session()\n\n def close(self):\n '''closses the current session destroying'''\n self.__session.remove()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"189498861","text":"# Makemono\n# For Python 3\n# By Zoe Blade\n\n# Converts stereo .wav files into mono .wav files\n\nimport struct # For converting the (two's complement?) binary data to integers\nimport sys # For command line arguments\nimport wave # For .wav input and output\n\n# Set sensible defaults\nchannel = 'both'\ninputFilenames = []\n\nacceptableChannels = ['both', 'left', 'right']\n\n# Override the defaults\nfor argument in sys.argv:\n\t# Override the filename\n\tif (argument[-4:].lower() == '.wav'):\n\t\tinputFilenames.append(argument)\n\t\tcontinue\n\n\t# Override the channel\n\tif (argument[:10] == '--channel='):\n\t\tif (argument[10:] in acceptableChannels):\n\t\t\tchannel = argument[10:]\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(argument[10:], \"ain't any channel I ever heard of\")\n\t\t\texit()\n\nif (len(inputFilenames) == 0):\n\tprint(\"\"\"\\\nUsage:\npython3 makemono.py [option...] input.wav\n\nOptions: (may appear before or after arguments)\n\t--channel=foo\n\t\tset which channel to extract (default is both, other options are left and right)\n\t\"\"\")\n\texit()\n\n# Cycle through files\nfor inputFilename in inputFilenames:\n\toutputFilename = inputFilename[:-4] + '-mono' + '.wav'\n\n\ttry:\n\t\tinputFile = wave.open(inputFilename, 'r')\n\texcept:\n\t\tprint(inputFilename, \"doesn't look like a valid .wav file. Skipping.\")\n\t\tcontinue\n\n\tif (inputFile.getnchannels() != 2):\n\t\tprint(inputFilename, \"isn't stereo. Skipping.\")\n\t\tcontinue\n\n\ttry:\n\t\toutputFile = wave.open(outputFilename, 'w')\n\t\toutputFile.setnchannels(1)\n\t\toutputFile.setsampwidth(inputFile.getsampwidth())\n\t\toutputFile.setframerate(inputFile.getframerate())\n\texcept:\n\t\tprint(\"I couldn't write to\", outputFilename, \"Skipping.\")\n\t\tcontinue\n\n\tsampleWidth = inputFile.getsampwidth()\n\n\tif (channel == 'both'):\n\t\tprint('Extracting both channels of', inputFilename, 'into', outputFilename)\n\telif (channel == 'left'):\n\t\tprint('Extracting left channel of', inputFilename, 'into', outputFilename)\n\telif (channel == 'right'):\n\t\tprint('Extracting right channel of', inputFilename, 'into', outputFilename)\n\n\tfor iteration in range (0, inputFile.getnframes()):\n\t\tdatum = inputFile.readframes(1)\n\n\t\tif (channel == 'both'):\n\t\t\tleftChannelAsInteger = struct.unpack(' 0:\n self.response.out.write(json_update)\n else:\n self.response.out.write(html)\n\n def get_profile_target_user_data(self):\n email = self.request_student_email_legacy()\n # TODO: ACL\n return UserData.get_possibly_current_user(email)\n\n def redirect_if_not_ajax(self, student):\n if not self.is_ajax_request():\n # If it's not an ajax request, redirect to the appropriate /profile URL\n self.redirect(\"/profile?selected_graph_type=%s&student_email=%s&graph_query_params=%s\" %\n (self.GRAPH_TYPE, urllib.quote(student.email), urllib.quote(urllib.quote(self.request.query_string))))\n return True\n return False\n\n def redirect_for_more_data(self):\n return False\n\n def json_update(self, user_data):\n return \"\"\n\nclass ClassProfileGraph(ProfileGraph):\n def get_profile_target_user_data(self):\n coach = UserData.current()\n\n if coach:\n user_override = self.request_user_data(\"coach_email\")\n if user_override and user_override.are_students_visible_to(coach):\n # Only allow looking at a student list other than your own\n # if you are a dev, admin, or coworker.\n coach = user_override\n\n return coach\n\n def redirect_if_not_ajax(self, coach):\n if not self.is_ajax_request():\n # If it's not an ajax request, redirect to the appropriate /profile URL\n self.redirect(\"/class_profile?selected_graph_type=%s&coach_email=%s&graph_query_params=%s\" %\n (self.GRAPH_TYPE, urllib.quote(coach.email), urllib.quote(urllib.quote(self.request.query_string))))\n return True\n return False\n\n def get_student_list(self, coach):\n student_lists = StudentList.get_for_coach(coach.key())\n _, actual_list = get_last_student_list(self, student_lists, coach.key()==UserData.current().key())\n return actual_list\n\nclass ProfileDateToolsGraph(ProfileGraph):\n\n DATE_FORMAT = \"%Y-%m-%d\"\n\n @staticmethod\n def inclusive_start_date(dt):\n return datetime.datetime(dt.year, dt.month, dt.day, 0, 0, 0) # Inclusive of start date\n\n @staticmethod\n def inclusive_end_date(dt):\n return datetime.datetime(dt.year, dt.month, dt.day, 23, 59, 59) # Inclusive of end date\n\n def request_date_ctz(self, key):\n # Always work w/ client timezone dates on the client and UTC dates on the server\n dt = self.request_date(key, self.DATE_FORMAT, default=datetime.datetime.min)\n if dt == datetime.datetime.min:\n s_dt = self.request_string(key, default=\"\")\n if s_dt == \"today\":\n dt = self.inclusive_start_date(self.utc_to_ctz(datetime.datetime.now()))\n elif s_dt == \"yesterday\":\n dt = self.inclusive_start_date(self.utc_to_ctz(datetime.datetime.now()) - datetime.timedelta(days=1))\n elif s_dt == \"lastweek\":\n dt = self.inclusive_start_date(self.utc_to_ctz(datetime.datetime.now()) - datetime.timedelta(days=6))\n elif s_dt == \"lastmonth\":\n dt = self.inclusive_start_date(self.utc_to_ctz(datetime.datetime.now()) - datetime.timedelta(days=29))\n return dt\n\n def tz_offset(self):\n return self.request_int(\"tz_offset\", default=0)\n\n def ctz_to_utc(self, dt_ctz):\n return dt_ctz - datetime.timedelta(minutes=self.tz_offset())\n\n def utc_to_ctz(self, dt_utc):\n return dt_utc + datetime.timedelta(minutes=self.tz_offset())\n\nclass ClassProfileDateGraph(ClassProfileGraph, ProfileDateToolsGraph):\n DATE_FORMAT = \"%d/%m/%Y\"\n\n def get_date(self):\n dt_ctz = self.request_date_ctz(\"dt\")\n\n if dt_ctz == datetime.datetime.min:\n # If no date, assume looking at today\n dt_ctz = self.utc_to_ctz(datetime.datetime.now())\n\n return self.ctz_to_utc(self.inclusive_start_date(dt_ctz))\n\nclass ProfileDateRangeGraph(ProfileDateToolsGraph):\n\n def get_start_date(self):\n dt_ctz = self.request_date_ctz(\"dt_start\")\n\n if dt_ctz == datetime.datetime.min:\n # If no start date, assume looking at last 7 days\n dt_ctz = self.utc_to_ctz(datetime.datetime.now() - datetime.timedelta(days=6))\n\n return self.ctz_to_utc(self.inclusive_start_date(dt_ctz))\n\n def get_end_date(self):\n dt_ctz = self.request_date_ctz(\"dt_end\")\n dt_start_ctz_test = self.request_date_ctz(\"dt_start\")\n dt_start_ctz = self.utc_to_ctz(self.get_start_date())\n\n if (dt_ctz == datetime.datetime.min and dt_start_ctz_test == datetime.datetime.min):\n # If no end date or start date specified, assume looking at 7 days after start date\n dt_ctz = dt_start_ctz + datetime.timedelta(days=6)\n elif dt_ctz == datetime.datetime.min:\n # If start date specified but no end date, assume one day\n dt_ctz = dt_start_ctz\n\n if (dt_ctz - dt_start_ctz).days > consts.MAX_GRAPH_DAY_RANGE or dt_start_ctz > dt_ctz:\n # Maximum range of 30 days for now\n dt_ctz = dt_start_ctz + datetime.timedelta(days=consts.MAX_GRAPH_DAY_RANGE)\n\n return self.ctz_to_utc(self.inclusive_end_date(dt_ctz))\n\n def redirect_for_more_data(self):\n dt_start_ctz_test = self.request_date_ctz(\"dt_start\")\n dt_end_ctz_test = self.request_date_ctz(\"dt_end\")\n\n # If no dates were specified and activity was empty, try max day range instead of default 7.\n if dt_start_ctz_test == datetime.datetime.min and dt_end_ctz_test == datetime.datetime.min:\n self.redirect(self.request_url_with_additional_query_params(\"dt_start=lastmonth&dt_end=today&is_ajax_override=1\"))\n return True\n\n return False\n\nclass ActivityGraph(ProfileDateRangeGraph):\n GRAPH_TYPE = \"activity\"\n def graph_html_and_context(self, student):\n return templatetags.profile_activity_graph(student, self.get_start_date(), self.get_end_date(), self.tz_offset())\n\nclass FocusGraph(ProfileDateRangeGraph):\n GRAPH_TYPE = \"focus\"\n def graph_html_and_context(self, student):\n return templatetags.profile_focus_graph(student, self.get_start_date(), self.get_end_date())\n\nclass ExercisesOverTimeGraph(ProfileGraph):\n GRAPH_TYPE = \"exercisesovertime\"\n def graph_html_and_context(self, student):\n return templatetags.profile_exercises_over_time_graph(student)\n\nclass ExerciseProblemsGraph(ProfileGraph):\n GRAPH_TYPE = \"exerciseproblems\"\n def graph_html_and_context(self, student):\n return templatetags.profile_exercise_problems_graph(student, self.request_string(\"exercise_name\"))\n\nclass ClassExercisesOverTimeGraph(ClassProfileGraph):\n GRAPH_TYPE = \"classexercisesovertime\"\n def graph_html_and_context(self, coach):\n student_list = self.get_student_list(coach)\n return templatetags.class_profile_exercises_over_time_graph(coach, student_list)\n\nclass ClassProgressReportGraph(ClassProfileGraph):\n GRAPH_TYPE = \"progressreport\"\n\nclass ClassTimeGraph(ClassProfileDateGraph):\n GRAPH_TYPE = \"classtime\"\n def graph_html_and_context(self, coach):\n student_list = self.get_student_list(coach)\n return templatetags.class_profile_time_graph(coach, self.get_date(), self.tz_offset(), student_list)\n\nclass ClassEnergyPointsPerMinuteGraph(ClassProfileGraph):\n GRAPH_TYPE = \"classenergypointsperminute\"\n def graph_html_and_context(self, coach):\n student_list = self.get_student_list(coach)\n return templatetags.class_profile_energy_points_per_minute_graph(coach, student_list)\n\n def json_update(self, coach):\n student_list = self.get_student_list(coach)\n return templatetags.class_profile_energy_points_per_minute_update(coach, student_list)\n","sub_path":"app/profiles/util_profile.py","file_name":"util_profile.py","file_ext":"py","file_size_in_byte":22247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185829661","text":"import cast_upgrade_1_5_22 # @UnusedImport\nfrom cast.application import ApplicationLevelExtension\nfrom logging import debug, info\nfrom traceback import format_exc\nfrom logger import warning\nfrom cast.application import create_link\n\nclass SynonymLinksProperties(ApplicationLevelExtension):\n \n def end_application(self, application):\n \n info(\"Start pushing links from synonyms to aliased objects\")\n list_of_callee_types = ['SQLScriptTable', 'SQLScriptView', 'SQLScriptMethod','SQLScriptType', 'SQLScriptProcedure', 'SQLScriptFunction']\n list_of_synonyms_temporary_types = ['SQLScriptTypeSynonym', 'SQLScriptProcedureSynonym', \n 'SQLScriptFunctionSynonym', 'SQLScriptViewSynonym', 'SQLScriptTableSynonym']\n \n temporary_links = application.links().has_callee(application.objects().has_type(list_of_callee_types))\\\n .has_caller(application.objects().has_type(list_of_synonyms_temporary_types)).count()\n if temporary_links == 0:\n debug(\" There is no link between aliased objects and temporary objects\")\n else:\n client = None\n bookmark = None\n aliased = None\n alias = None\n list_links_type = None\n \n list_of_aliases = []\n list_of_aliased_links = []\n list_of_aliased_links_t = []\n list_of_aliased_links_o = []\n list_of_o_callee_types = ['SQLScriptView', 'SQLScriptType', 'SQLScriptProcedure', 'SQLScriptFunction']\n list_of_link_types = ('useSelect', 'useDelete', 'useUpdate', 'useInsert', 'call', 'useSelectLink', 'useDeleteLink', 'useUpdateLink', \n 'useInsertLink', 'callLink')\n list_of_excluded_types = list_of_synonyms_temporary_types + \\\n\t\t\t\t\t\t\t\t\t['SQLScriptSynonym', 'SQLScriptUniqueConstraint', 'SQLScriptForeignKey',\n\t\t\t\t\t\t\t\t\t'SQLScriptIndex', 'SQLScriptTableColumn', 'SQLScriptExternalProgram',\n 'SQLScriptPackage', 'SQLScriptPackageSynonym']\n\n list_of_excluded_types_t = ['SQLScriptTable'] + list_of_excluded_types\n \n # retrieve the list of links for objects that could have aliases\n for link in application.links().load_positions()\\\n .has_callee(application.objects().has_type('SQLScriptTable'))\\\n .has_caller(application.objects().not_has_type(list_of_excluded_types_t)):\n\n list_links_type, aliased, client, bookmark = link.get_type_names(), link.get_callee(), link.get_caller(), link.get_positions()\n if not bookmark:\n continue\n\n list_of_aliased_links_temp = [(link_type, client, aliased, bookmark[0]) for link_type in list_links_type if link_type in list_of_link_types]\n list_of_aliased_links_t += list_of_aliased_links_temp\n \n for link in application.links().load_positions()\\\n .has_callee(application.objects().has_type(list_of_o_callee_types))\\\n .has_caller(application.objects().not_has_type(list_of_excluded_types)):\n\n list_links_type, aliased, client, bookmark = link.get_type_names(), link.get_callee(), link.get_caller(), link.get_positions()\n\n if not bookmark:\n continue\n list_of_aliased_links_temp = [(link_type, client, aliased, bookmark[0]) for link_type in list_links_type if link_type in list_of_link_types]\n list_of_aliased_links_o += list_of_aliased_links_temp\n \n list_of_aliased_links = list_of_aliased_links_t + list_of_aliased_links_o\n\n # retrieve alias and objects aliased list\n list_of_aliases = [(link.get_callee(), link.get_caller()) for link in application.links()\\\n .has_callee(application.objects().has_type(list_of_callee_types))\\\n .has_caller(application.objects().has_type(list_of_synonyms_temporary_types))]\n\n def check_final_object (alias):\n for aliased in list_of_aliases:\n if str(aliased[1]) == str(alias):\n return aliased[0]\n return None\n \n def check_exists_link (link_type, client, aliased, bookmark):\n existing_link = False\n for list_of_links in list_of_aliased_links:\n if str(link_type) == str(list_of_links[0]) and str(client) == str(list_of_links[1]) \\\n and str(aliased) == str(list_of_links[2]) and str(bookmark) == str(list_of_links[3]):\n existing_link = True\n break\n \n return existing_link\n \n # retrieve the list between aliases and client objects\n # from the list of client objects explude temporary objects and table subobjects, like FK, PK, etc\n for link in application.links().load_positions()\\\n\t\t\t\t\t\t.has_callee(application.objects().has_type(list_of_synonyms_temporary_types))\\\n\t\t\t\t\t\t.has_caller(application.objects().not_has_type(list_of_excluded_types)):\n list_links_type = link.get_type_names()\n client = link.get_caller()\n alias = link.get_callee()\n bookmark = link.get_positions()\n \n if not bookmark:\n continue\n \n # retrieve the aliased object\n aliased = check_final_object(alias)\n if not aliased:\n continue\n \n for link_type in list_links_type:\n if link_type in list_of_link_types: \n links_exists = check_exists_link (link_type, client.fullname, aliased.fullname, bookmark[0]) \n if not links_exists:\n saved_link_type = link_type if link_type[-4:].lower() == 'link' else '%sLink' % link_type \n try:\n# print(' Add ', saved_link_type, ' between ', client.fullname,' and ', aliased.fullname, ', bookmark=(', bookmark[0], ')')\n debug(\" Add %s between %s and %s, bookmark=(%s)\" % (saved_link_type, client.fullname, aliased.fullname, bookmark[0]))\n\n create_link(saved_link_type, client, aliased, bookmark[0])\n except:\n warning('SQL-013','Links could not be moved from synonyms to aliased objects because of %s ' % format_exc())\n \n info(\"End pushing links from synonyms to aliased objects\")\n\n info(\"Start removing temporary objects\")\n \n req = \"\"\"insert into CI_NO_OBJECTS (OBJECT_ID, ERROR_ID)\nselect IdKey, 0 from Keys \n where \n ObjTyp in ( 1101042, 1101043, 1101044, 1101045, 1101046, 1101050) -- specific synonyms\n\"\"\"\n try:\n application.update_cast_knowledge_base('SQL-007', req)\n except:\n warning('SQL-007','Temporary objects cannot be removed, because of %s ' % format_exc())\n \n info(\"End removing temporary objects\")","sub_path":"analyze/extensions/com.castsoftware.sqlanalyzer.2.6.9-funcrel/TransfertSynonymsToObjectsLinks.py","file_name":"TransfertSynonymsToObjectsLinks.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"483937258","text":"import numpy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import autograd\nimport math\nfrom typing import Any, Dict, Tuple, List, Union, Set\nimport warnings\nfrom collections import OrderedDict\nfrom torch.nn.modules.linear import Linear\nfrom torchmeta.modules import MetaLinear, MetaSequential, MetaModule\nfrom tqdm import tqdm\n\nclass LInEx(MetaModule):\n def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None,**kwargs)->None:\n super().__init__()\n if input_dim != hidden_dim:\n self.input_map = MetaSequential(OrderedDict({\n \"linear_0\": MetaLinear(input_dim, hidden_dim),\n \"relu_0\": nn.ReLU(),\n \"dropout_0\": nn.Dropout(0.2),\n \"linear_1\": MetaLinear(hidden_dim, hidden_dim),\n \"relu_1\": nn.ReLU()\n }))\n else:\n self.input_map = lambda x:x\n self.classes = MetaLinear(hidden_dim, max_slots, bias=False)\n _mask = torch.zeros(1, max_slots, dtype=torch.float, device=device)\n _mask[:, init_slots:] = float(\"-inf\")\n self.register_buffer(name=\"_mask\", tensor=_mask)\n self.crit = nn.CrossEntropyLoss()\n self.device = device\n self.to(device=device)\n self.nslots = init_slots\n self.max_slots = max_slots\n self.maml = True\n self.outputs = {}\n self.history = None\n self.exemplar_features = None\n self.exemplar_labels = None\n self.dev_exemplar_features = None\n self.dev_exemplar_labels = None\n\n @property\n def mask(self,):\n self._mask[:, :self.nslots] = 0\n self._mask[:, self.nslots:] = float(\"-inf\")\n return self._mask\n\n def idx_mask(self, idx:Union[torch.LongTensor, int, List[int], None]=None, max_idx:Union[torch.LongTensor, int, None]=None):\n assert (idx is not None) or (max_idx is not None)\n assert (idx is None) or (max_idx is None)\n mask = torch.zeros_like(self._mask) + float(\"-inf\")\n if idx is not None:\n mask[:, idx] = 0\n if max_idx is not None:\n if isinstance(max_idx, torch.LongTensor):\n max_idx = max_idx.item()\n mask[:, :max_idx] = 0\n return mask\n\n @property\n def features(self):\n return self.classes.weight[:self.nslots]\n\n def forward(self, batch, nslots:int=-1, exemplar:bool=False, exemplar_distill:bool=False, feature_distill:bool=False, mul_distill=False, distill:bool=False, return_loss:bool=True, return_feature:bool=False, tau:float=1.0, log_outputs:bool=True, params=None):\n if isinstance(batch, (tuple, list)) and len(batch) == 2:\n features, labels = batch\n else:\n features, labels = batch.features, batch.labels\n inputs = self.input_map(features, params=self.get_subdict(params, \"input_map\"))\n scores = self.classes(inputs, params=self.get_subdict(params, \"classes\"))\n if torch.any(torch.isnan(scores)):\n print(scores[0])\n input('a')\n if nslots == -1:\n scores += self.mask\n if torch.any(torch.isnan(scores)):\n print(scores[0])\n input()\n nslots = self.nslots\n else:\n scores += self.idx_mask(max_idx=nslots)\n scores[:, 0] = 0\n if scores.size(0) != labels.size(0):\n assert scores.size(0) % labels.size(0) == 0\n labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)\n else:\n labels = labels\n if log_outputs:\n pred = torch.argmax(scores, dim=1)\n acc = torch.mean((pred == labels).float())\n self.outputs[\"accuracy\"] = acc.item()\n self.outputs[\"prediction\"] = pred.detach().cpu()\n self.outputs[\"label\"] = labels.detach().cpu()\n self.outputs[\"input_features\"] = features.detach().cpu()\n self.outputs[\"encoded_features\"] = inputs.detach().cpu()\n if return_loss:\n labels.masked_fill_(labels >= nslots, 0)\n valid = labels < nslots\n nvalid = torch.sum(valid.float())\n if nvalid == 0:\n loss = 0\n else:\n loss = self.crit(scores[valid], labels[valid])\n if torch.isnan(loss):\n print(labels, nslots, scores[:, :nslots])\n input()\n if distill and self.history is not None:\n old_scores, old_inputs = self.forward(batch, nslots=self.history[\"nslots\"], return_loss=False, log_outputs=False, return_feature=True, params=self.history[\"params\"])\n old_scores = old_scores.detach()\n old_inputs = old_inputs.detach()\n new_scores = scores[:, :self.history[\"nslots\"]]\n if mul_distill:\n loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()\n old_dist = torch.softmax(old_scores/tau, dim=1)\n old_valid = (old_dist[:, 0] < 0.9)\n old_num = torch.sum(old_valid.float())\n if old_num > 0:\n # print(old_dist[old_valid].topk(5, dim=1), batch.labels[old_valid])\n # input()\n loss_mul_distill = - torch.sum(old_dist[old_valid] * torch.log_softmax(new_scores[old_valid], dim=1), dim=1).sum()\n loss_distill = (loss_distill * old_dist.size(0) + loss_mul_distill) / (old_dist.size(0) + old_num)\n else:\n loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()\n if feature_distill:\n loss_f_distill = (1 - (old_inputs / old_inputs.norm(dim=-1, keepdim=True) * inputs / inputs.norm(dim=-1, keepdim=True)).sum(dim=-1)).mean(dim=0)\n loss_distill += loss_f_distill\n\n d_weight = self.history[\"nslots\"]\n c_weight = (self.nslots - self.history[\"nslots\"])\n loss = ( d_weight * loss_distill+ c_weight* loss) / (d_weight+c_weight)\n if torch.isnan(loss):\n print(old_scores, new_scores)\n input()\n if exemplar and self.exemplar_features is not None:\n if self.exemplar_features.size(0) < 128:\n exemplar_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, \"classes\"))\n else:\n exemplar_scores = []\n exemplar_inputs = []\n for _beg in range(0, self.exemplar_features.size(0), 128):\n _features = self.exemplar_features[_beg:_beg+128, :]\n _inputs = self.input_map(_features.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_inputs.append(_inputs)\n exemplar_scores.append(self.classes(_inputs, params=self.get_subdict(params, \"classes\")))\n exemplar_inputs = torch.cat(exemplar_inputs, dim=0)\n exemplar_scores = torch.cat(exemplar_scores, dim=0)\n exemplar_scores[:, 0] = 0.\n loss_exemplar = self.crit(exemplar_scores+self.mask, self.exemplar_labels.to(self.device))\n if torch.isnan(loss_exemplar):\n print(self.exemplar_labels, nslots)\n input()\n if exemplar_distill:\n if self.exemplar_features.size(0) < 128:\n exemplar_old_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(self.history[\"params\"], \"input_map\"))\n exemplar_old_scores = self.classes(exemplar_old_inputs, params=self.get_subdict(self.history[\"params\"], \"classes\"))\n else:\n exemplar_old_scores = []\n exemplar_old_inputs = []\n for _beg in range(0, self.exemplar_features.size(0), 128):\n _features = self.exemplar_features[_beg:_beg+128, :]\n _inputs = self.input_map(_features.to(self.device), params=self.get_subdict(self.history[\"params\"], \"input_map\"))\n exemplar_old_inputs.append(_inputs)\n exemplar_old_scores.append(self.classes(_inputs, params=self.get_subdict(self.history[\"params\"], \"classes\")))\n exemplar_old_inputs = torch.cat(exemplar_old_inputs, dim=0)\n exemplar_old_scores = torch.cat(exemplar_old_scores, dim=0)\n exemplar_old_scores[:, 0] = 0.\n exemplar_old_scores = exemplar_old_scores[:self.history[\"nslots\"]]\n loss_exemplar_distill = - torch.sum(torch.softmax(exemplar_old_scores[:self.history[\"nslots\"]]*tau, dim=1) * torch.log_softmax(exemplar_scores[:self.history[\"nslots\"]], dim=1), dim=1).mean()\n if feature_distill:\n loss_exemplar_feat_distill = (1 - (exemplar_old_inputs / exemplar_old_inputs.norm(dim=-1, keepdim=True) * exemplar_inputs / exemplar_inputs.norm(dim=-1, keepdim=True)).sum(dim=-1)).mean(dim=0)\n loss_exemplar_distill += loss_exemplar_feat_distill\n d_weight = self.history[\"nslots\"]\n c_weight = (self.nslots - self.history[\"nslots\"])\n loss_exemplar = (d_weight * loss_exemplar_distill+ c_weight* loss_exemplar) / (d_weight+c_weight)\n e_weight = self.exemplar_features.size(0)\n loss = (nvalid * loss + e_weight * loss_exemplar) / (nvalid + e_weight)\n if torch.isnan(loss):\n print(loss, loss_exemplar)\n return loss\n else:\n if return_feature:\n return scores[:, :nslots], inputs\n else:\n return scores[:, :nslots]\n\n def score(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def clone_params(self,):\n return OrderedDict({k:v.clone().detach() for k,v in self.meta_named_parameters()})\n\n def set_history(self,):\n self.history = {\"params\": self.clone_params(), \"nslots\": self.nslots}\n\n def set_exemplar(self, dataloader, q:int=20, params=None, label_sets:Union[List, Set, None]=None, collect_none:bool=False, use_input:bool=False, output_only:bool=False, output:Union[str, None]=None):\n self.eval()\n with torch.no_grad():\n ifeat = []; ofeat = []; label = []\n num_batches = len(dataloader)\n for batch in tqdm(dataloader, \"collecting exemplar\", ncols=128):\n batch = batch.to(self.device)\n loss = self.forward(batch, params=params)\n ifeat.append(self.outputs[\"input_features\"])\n if use_input:\n ofeat.append(self.outputs[\"input_features\"])\n else:\n ofeat.append(self.outputs[\"encoded_features\"])\n label.append(self.outputs[\"label\"])\n ifeat = torch.cat(ifeat, dim=0)\n ofeat = torch.cat(ofeat, dim=0)\n label = torch.cat(label, dim=0)\n nslots = max(self.nslots, torch.max(label).item()+1)\n exemplar = {}\n if label_sets is None:\n if collect_none:\n label_sets = range(nslots)\n else:\n label_sets = range(1, nslots)\n else:\n if collect_none:\n if 0 not in label_sets:\n label_sets = sorted([0] + list(label_sets))\n else:\n label_sets = sorted(list(label_sets))\n else:\n label_sets = sorted([t for t in label_sets if t != 0])\n for i in label_sets:\n idx = (label == i)\n if i == 0:\n # random sample for none type\n nidx = torch.nonzero(idx, as_tuple=True)[0].tolist()\n exemplar[i] = numpy.random.choice(nidx, q, replace=False).tolist()\n continue\n if torch.any(idx):\n exemplar[i] = []\n nidx = torch.nonzero(idx, as_tuple=True)[0].tolist()\n mfeat = torch.mean(ofeat[idx], dim=0, keepdims=True)\n if len(nidx) < q:\n exemplar[i].extend(nidx * (q // len(nidx)) + nidx[:(q % len(nidx))])\n else:\n for j in range(q):\n if j == 0:\n dfeat = torch.sum((ofeat[nidx] - mfeat)**2, dim=1)\n else:\n cfeat = ofeat[exemplar[i]].sum(dim=0, keepdims=True)\n cnum = len(exemplar[i])\n dfeat = torch.sum((mfeat * (cnum + 1) - ofeat[nidx] - cfeat)**2, )\n tfeat = torch.argmin(dfeat)\n exemplar[i].append(nidx[tfeat])\n nidx.pop(tfeat.item())\n exemplar = {i: ifeat[v] for i,v in exemplar.items()}\n exemplar_features = []\n exemplar_labels = []\n for label, features in exemplar.items():\n exemplar_features.append(features)\n exemplar_labels.extend([label]*features.size(0))\n exemplar_features = torch.cat(exemplar_features, dim=0).cpu()\n exemplar_labels = torch.LongTensor(exemplar_labels).cpu()\n if not output_only or output is not None:\n if output == \"train\" or output is None:\n if self.exemplar_features is None:\n self.exemplar_features = exemplar_features\n self.exemplar_labels = exemplar_labels\n else:\n self.exemplar_features = torch.cat((self.exemplar_features, exemplar_features), dim=0)\n self.exemplar_labels = torch.cat((self.exemplar_labels, exemplar_labels), dim=0)\n elif output == \"dev\":\n if self.dev_exemplar_features is None:\n self.dev_exemplar_features = exemplar_features\n self.dev_exemplar_labels = exemplar_labels\n else:\n self.dev_exemplar_features = torch.cat((self.dev_exemplar_features, exemplar_features), dim=0)\n self.dev_exemplar_labels = torch.cat((self.dev_exemplar_labels, exemplar_labels), dim=0)\n\n return {i: v.cpu() for i,v in exemplar.items()}\n\n def initialize(self, exemplar, ninstances:Dict[int, int], gamma:float=1.0, tau:float=1.0, alpha:float=0.5, params=None):\n self.eval()\n \n with torch.no_grad():\n weight_norm = torch.norm(self.classes.weight[1:self.nslots], dim=1).mean(dim=0)\n label_inits = []\n label_kt = {}\n for label, feats in exemplar.items():\n exemplar_inputs = self.input_map(feats.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, \"classes\"))\n exemplar_scores = exemplar_scores + self.mask\n exemplar_scores[:, 0] = 0\n exemplar_weights = torch.softmax(exemplar_scores * tau, dim=1)\n normalized_inputs = exemplar_inputs / torch.norm(exemplar_inputs, dim=1, keepdim=True) * weight_norm\n proto = (exemplar_weights[:, :1] * normalized_inputs).mean(dim=0)\n knowledge = torch.matmul(exemplar_weights[:, 1:self.nslots], self.classes.weight[1:self.nslots]).mean(dim=0)\n gate = alpha * math.exp(- ninstances[label] * gamma)\n # gate = 1 / (1 + ninstances[label] * gamma)\n rnd = torch.randn_like(proto) * weight_norm / math.sqrt(self.classes.weight.size(1))\n initvec = proto * gate + knowledge * gate + (1 - gate) * rnd\n label_inits.append((label, initvec.cpu()))\n label_kt[label] = exemplar_weights.mean(dim=0).cpu()\n label_inits.sort(key=lambda t:t[0])\n inits = []\n for i, (label, init) in enumerate(label_inits):\n assert label == self.nslots + i\n inits.append(init)\n inits = torch.stack(inits, dim=0)\n self.outputs[\"new2old\"] = label_kt\n return inits.detach()\n \n def initialize2(self, exemplar, ninstances:Dict[int, int], gamma:float=1.0, tau:float=1.0, alpha:float=0.5, delta:float=0.5, params=None):\n self.eval()\n def top_p(probs, p=0.9):\n _val, _idx = torch.sort(probs, descending=True, dim=1)\n top_mask = torch.zeros_like(probs).float() - float(\"inf\")\n for _type in range(probs.size(0)):\n accumulated = 0\n _n = 0\n while accumulated < p or _n <= 1:\n top_mask[_type, _idx[_type, _n]] = 0\n accumulated += _val[_type, _n]\n _n += 1\n return top_mask\n with torch.no_grad():\n weight_norm = torch.norm(self.classes.weight[1:self.nslots], dim=1).mean(dim=0)\n label_inits = []\n label_kt = {}\n for label, feats in exemplar.items():\n exemplar_inputs = self.input_map(feats.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, \"classes\"))\n exemplar_scores = exemplar_scores + self.mask\n exemplar_scores[:, 0] = 0\n top_mask = top_p(torch.softmax(exemplar_scores, dim=1))\n exemplar_scores = exemplar_scores + top_mask\n exemplar_scores[:, 0] = 0\n exemplar_weights = torch.softmax(exemplar_scores * tau, dim=1)\n normalized_inputs = exemplar_inputs / torch.norm(exemplar_inputs, dim=1, keepdim=True) * weight_norm\n proto = delta * (exemplar_weights[:, :1] * normalized_inputs).mean(dim=0)\n kweight = (1 - exemplar_weights[:, :1])\n knowledge = torch.matmul((1-delta*exemplar_weights[:, :1]) * (exemplar_weights[:, 1:self.nslots] + 1e-8) / torch.clamp(1 - exemplar_weights[:, :1], 1e-8), self.classes.weight[1:self.nslots]).mean(dim=0)\n gate = alpha * math.exp(- ninstances[label] * gamma)\n rnd = torch.randn_like(proto) * weight_norm / math.sqrt(self.classes.weight.size(1))\n initvec = proto * gate + knowledge * gate + (1 - gate) * rnd\n if torch.any(torch.isnan(initvec)):\n print(proto, knowledge, rnd, gate, exemplar_weights[:, :1], exemplar_scores[-1, :self.nslots])\n input()\n label_inits.append((label, initvec.cpu()))\n label_kt[label] = exemplar_weights.mean(dim=0).cpu()\n label_inits.sort(key=lambda t:t[0])\n inits = []\n for i, (label, init) in enumerate(label_inits):\n assert label == self.nslots + i\n inits.append(init)\n inits = torch.stack(inits, dim=0)\n self.outputs[\"new2old\"] = label_kt\n return inits.detach()\n \n def set(self, features:torch.tensor, ids:Union[int, torch.Tensor, List, None]=None, max_id:int=-1):\n with torch.no_grad():\n if isinstance(ids, (torch.Tensor, list)):\n if torch.any(ids > self.nslots):\n warnings.warn(\"Setting features to new classes. Using 'extend' or 'append' is preferred for new classes\")\n self.classes.weight[ids] = features\n elif isinstance(ids, int):\n self.classes.weight[ids] = features\n else:\n if max_id == -1:\n raise ValueError(f\"Need input for either ids or max_id\")\n self.classes.weight[:max_id] = features\n\n def append(self, feature):\n with torch.no_grad():\n self.classes.weight[self.nslots] = feature\n self.nslots += 1\n\n def extend(self, features):\n with torch.no_grad():\n features = features.to(self.device)\n if len(features.size()) == 1:\n warnings.warn(\"Extending 1-dim feature vector. Using 'append' instead is preferred.\")\n self.append(features)\n else:\n nclasses = features.size(0)\n self.classes.weight[self.nslots:self.nslots+nclasses] = features\n self.nslots += nclasses\n\nclass BIC(LInEx):\n def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None, **kwargs)->None:\n super().__init__(input_dim,hidden_dim,max_slots,init_slots,device,**kwargs)\n self.correction_weight = nn.Parameter(torch.ones(1, dtype=torch.float, device=self.device, requires_grad=True))\n self.correction_bias = nn.Parameter(torch.zeros(1, dtype=torch.float, device=self.device, requires_grad=True))\n self.correction_stream = [init_slots]\n\n def add_stream(self, num_classes):\n self.correction_stream.append(self.correction_stream[-1]+num_classes)\n\n def forward(self, batch, nslots:int=-1, bias_correction:str=\"none\", exemplar:bool=False, exemplar_distill:bool=False, distill:bool=False, return_loss:bool=True, tau:float=1.0, log_outputs:bool=True, params=None):\n assert bias_correction in [\"none\", \"last\", \"current\"]\n if distill:\n assert bias_correction != \"current\"\n if isinstance(batch, (tuple, list)) and len(batch) == 2:\n features, labels = batch\n else:\n features, labels = batch.features, batch.labels\n inputs = self.input_map(features, params=self.get_subdict(params, \"input_map\"))\n scores = self.classes(inputs, params=self.get_subdict(params, \"classes\"))\n if nslots == -1:\n scores += self.mask\n nslots = self.nslots\n else:\n scores += self.idx_mask(max_idx=nslots)\n scores[:, 0] = 0\n if bias_correction == \"current\":\n assert len(self.correction_stream) >= 2\n scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.correction_weight\n scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.correction_bias\n if scores.size(0) != labels.size(0):\n assert scores.size(0) % labels.size(0) == 0\n labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)\n else:\n labels = labels\n if log_outputs:\n pred = torch.argmax(scores, dim=1)\n acc = torch.mean((pred == labels).float())\n self.outputs[\"accuracy\"] = acc.item()\n self.outputs[\"prediction\"] = pred.detach().cpu()\n self.outputs[\"label\"] = labels.detach().cpu()\n self.outputs[\"input_features\"] = features.detach().cpu()\n self.outputs[\"encoded_features\"] = inputs.detach().cpu()\n if return_loss:\n labels.masked_fill_(labels >= nslots, 0)\n valid = labels < nslots\n nvalid = torch.sum(valid.float())\n if nvalid == 0:\n loss = 0\n else:\n loss = self.crit(scores[valid], labels[valid])\n if distill and self.history is not None:\n old_scores = self.forward(batch, nslots=self.history[\"nslots\"], return_loss=False, log_outputs=False, params=self.history[\"params\"]).detach()\n if bias_correction == \"last\":\n old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.history['correction_weight']\n old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.history['correction_bias']\n new_scores = scores[:, :self.history[\"nslots\"]]\n loss_distill = - torch.sum(torch.softmax(old_scores*tau, dim=1) * torch.log_softmax(new_scores*tau, dim=1), dim=1).mean()\n d_weight = self.history[\"nslots\"]\n c_weight = (self.nslots - self.history[\"nslots\"])\n loss = ( d_weight * loss_distill+ c_weight* loss) / (d_weight+c_weight)\n if exemplar and self.exemplar_features is not None:\n if self.exemplar_features.size(0) < 128:\n exemplar_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_scores = self.classes(exemplar_inputs, params=self.get_subdict(params, \"classes\"))\n else:\n exemplar_scores = []\n for _beg in range(0, self.exemplar_features.size(0), 128):\n _features = self.exemplar_features[_beg:_beg+128, :]\n _inputs = self.input_map(_features.to(self.device), params=self.get_subdict(params, \"input_map\"))\n exemplar_scores.append(self.classes(_inputs, params=self.get_subdict(params, \"classes\")))\n exemplar_scores = torch.cat(exemplar_scores, dim=0)\n exemplar_scores[:, 0] = 0.\n loss_exemplar = self.crit(exemplar_scores+self.mask, self.exemplar_labels.to(self.device))\n if exemplar_distill:\n if self.exemplar_features.size(0) < 128:\n exemplar_old_inputs = self.input_map(self.exemplar_features.to(self.device), params=self.get_subdict(self.history[\"params\"], \"input_map\"))\n exemplar_old_scores = self.classes(exemplar_old_inputs, params=self.get_subdict(self.history[\"params\"], \"classes\"))\n else:\n exemplar_old_scores = []\n for _beg in range(0, self.exemplar_features.size(0), 128):\n _features = self.exemplar_features[_beg:_beg+128, :]\n _inputs = self.input_map(_features.to(self.device), params=self.get_subdict(self.history[\"params\"], \"input_map\"))\n exemplar_old_scores.append(self.classes(_inputs, params=self.get_subdict(self.history[\"params\"], \"classes\")))\n exemplar_old_scores = torch.cat(exemplar_old_scores, dim=0)\n exemplar_old_scores[:, 0] = 0.\n if bias_correction == \"last\":\n exemplar_old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] *= self.history['correction_weight']\n exemplar_old_scores[:, self.correction_stream[-2]:self.correction_stream[-1]] += self.history['correction_bias']\n exemplar_old_scores = exemplar_old_scores[:self.history[\"nslots\"]]\n loss_exemplar_distill = - torch.sum(torch.softmax(exemplar_old_scores[:self.history[\"nslots\"]]*tau, dim=1) * torch.log_softmax(exemplar_scores[:self.history[\"nslots\"]], dim=1), dim=1).mean()\n d_weight = self.history[\"nslots\"]\n c_weight = (self.nslots - self.history[\"nslots\"])\n loss_exemplar = (d_weight * loss_exemplar_distill+ c_weight* loss_exemplar) / (d_weight+c_weight)\n e_weight = self.exemplar_features.size(0)\n loss = (nvalid * loss + e_weight * loss_exemplar) / (nvalid + e_weight)\n if torch.isnan(loss):\n print(loss, loss_exemplar)\n return loss\n else:\n return scores[:, :nslots]\n\n def forward_correction(self, *args, **kwargs):\n '''\n training:\n entropy: normal\n distill:\n old, last\n Fold, Fold * correction_weight + correction_bias,\n '''\n if len(args) >= 3:\n args[2] = \"current\"\n else:\n kwargs[\"bias_correction\"] = \"current\"\n return self.forward(*args,**kwargs)\n\n def set_history(self):\n super().set_history()\n self.history[\"correction_weight\"] = self.correction_weight.item()\n self.history[\"correction_bias\"] = self.correction_bias.item()\n\n def score(self, *args, **kwargs):\n if len(self.correction_stream) >= 2:\n return self.forward_correction(*args, **kwargs)\n else:\n if len(args) >= 3:\n args[2] = \"none\"\n else:\n kwargs[\"bias_correction\"] = \"none\"\n return self.forward(*args, **kwargs)\n\nclass ICARL(LInEx):\n def __init__(self,input_dim:int,hidden_dim:int,max_slots:int,init_slots:int,device:Union[torch.device, None]=None, **kwargs)->None:\n super().__init__(input_dim,hidden_dim,max_slots,init_slots,device,**kwargs)\n self.none_feat = None\n \n def set_none_feat(self, dataloader, params=None):\n self.eval()\n with torch.no_grad():\n ifeat = []; ofeat = []; label = []\n num_batches = len(dataloader)\n for batch in tqdm(dataloader, \"collecting exemplar\"):\n batch = batch.to(self.device)\n loss = self.forward(batch, params=params)\n ifeat.append(self.outputs[\"input_features\"])\n ofeat.append(self.outputs[\"encoded_features\"])\n label.append(self.outputs[\"label\"])\n ifeat = torch.cat(ifeat, dim=0)\n ofeat = torch.cat(ofeat, dim=0)\n label = torch.cat(label, dim=0)\n nslots = max(self.nslots, torch.max(label).item()+1)\n exemplar = {}\n idx = (label == 0)\n self.none_feat = ofeat[idx].mean(dim=0).cpu()\n return self.none_feat\n\n def score(self, batch, exemplar=None, params=None):\n if exemplar is None:\n exemplar_labels, exemplar_features = self.exemplar_labels, self.exemplar_features\n else:\n exemplar_labels, exemplar_features = exemplar\n\n inputs = self.input_map(batch.features, params=self.get_subdict(params, \"input_map\"))\n scores = []\n scores.append(- torch.sum((inputs - self.none_feat.to(inputs.device).unsqueeze(0))**2, dim=1))\n for i in range(1, self.nslots):\n label_idx = (exemplar_labels == i)\n label_features = exemplar_features[label_idx]\n label_inputs = self.input_map(label_features.to(inputs.device), params=self.get_subdict(params, \"input_map\")).mean(dim=0, keepdim=True)\n scores.append(- torch.sum((inputs - label_inputs)**2, dim=1))\n scores = torch.stack(scores, dim=0).transpose(0, 1)\n labels = batch.labels\n if scores.size(0) != labels.size(0):\n assert scores.size(0) % labels.size(0) == 0\n labels = labels.repeat_interleave(scores.size(0) // labels.size(0), dim=0)\n pred = torch.argmax(scores, dim=1)\n acc = torch.mean((pred == labels).float())\n labels.masked_fill_(labels >= self.nslots, 0)\n valid = labels < self.nslots\n nvalid = torch.sum(valid.float())\n if nvalid == 0:\n loss = 0\n else:\n loss = self.crit(scores[valid], labels[valid])\n self.outputs[\"accuracy\"] = acc.item()\n self.outputs[\"prediction\"] = pred.detach().cpu()\n self.outputs[\"label\"] = labels.detach().cpu()\n self.outputs[\"input_features\"] = batch.features.detach().cpu()\n self.outputs[\"encoded_features\"] = inputs.detach().cpu()\n return loss\n\ndef test(): # sanity check\n m = LInEx(nhead=8,nlayers=3,hidden_dim=512,input_dim=2048,max_slots=30,init_slots=9,device=torch.device(\"cpu\"))\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"models/nets.py","file_name":"nets.py","file_ext":"py","file_size_in_byte":32092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"428854241","text":"from functools import wraps\nfrom flask import request, make_response\nfrom flask import Flask\nfrom flask import jsonify\nfrom awake import wol\nimport os\nimport socket\n\napp = Flask(__name__)\nVALID_TOKEN = os.environ['HOME_TOKEN']\nCERT = '/ssl/home.ahands.org/fullchain.pem'\nKEY = '/ssl/home.ahands.org/privkey.pem'\n\ndef token_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not 'token' in request.args:\n return make_response(jsonify({ 'msg': 'Unauthenticated, no token provided', 'status': '401' }), 401)\n if 'deadbeef12' != request.args['token']:\n return make_response(jsonify({ 'msg': 'Unauthorized, invalid token', 'status': '403' }), 403)\n return f(*args, **kwargs)\n return decorated_function\n\n@app.route('/wake_cosmo')\n@token_required\ndef wake_cosmo():\n wol.send_magic_packet('D8:CB:8A:39:E2:BF')\n return jsonify({ \"action\": \"woke cosmo\" })\n\n@app.route('/cosmo_status')\n@token_required\ndef cosmo_status():\n s = socket.socket()\n s.settimeout(0.100)\n\n try:\n s.connect(('cosmo.lan', 22))\n except Exception as e:\n return jsonify({ \"status\": \"down\" })\n finally:\n s.close()\n return jsonify({ \"status\": \"up\" })\n\napp.run(host='0.0.0.0', ssl_context=(CERT, KEY))\n","sub_path":"pub/src/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"236887527","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 2 15:26:37 2019\n\n@author: Yung-Yu Tsai\n\nAn example of distributed convolution layer\n\"\"\"\n\n# setup\n\nimport keras\nimport numpy as np\nimport keras.backend as K\nimport time\n\nfrom simulator.utils_tool.weight_conversion import convert_original_weight_layer_name\nfrom simulator.utils_tool.dataset_setup import dataset_setup\nfrom simulator.utils_tool.confusion_matrix import show_confusion_matrix\nfrom simulator.metrics.topk_metrics import top2_acc\nfrom simulator.approximation.estimate import comp_num_estimate\n\nfrom keras.models import Model\nfrom keras.layers import Activation, Input, MaxPooling2D, Add\nfrom simulator.layers.quantized_layers import QuantizedConv2D, QuantizedDense, QuantizedFlatten, QuantizedDistributedConv2D\nfrom simulator.layers.quantized_ops import quantizer,build_layer_quantizer\nfrom simulator.models.model_library import quantized_4C2F\nfrom simulator.models.model_mods import exchange_distributed_conv\n\n#%%\n# model setup\n\nweight_name='../mnist_lenet5_weight.h5'\nbatch_size=25\n\ndef quantized_lenet5(nbits=8, fbits=4, rounding_method='nearest', input_shape=(28,28,1), num_classes=10, batch_size=None, ifmap_fault_dict_list=None, ofmap_fault_dict_list=None, weight_fault_dict_list=None, quant_mode='hybrid', overflow_mode=False, stop_gradient=False,):\n \n print('\\nBuilding model : Quantized Lenet 5')\n \n layer_quantizer=build_layer_quantizer(nbits,fbits,rounding_method,overflow_mode,stop_gradient)\n \n if ifmap_fault_dict_list is None:\n ifmap_fault_dict_list=[None for i in range(8)]\n else:\n print('Inject input fault')\n if ofmap_fault_dict_list is None:\n ofmap_fault_dict_list=[None for i in range(8)]\n else:\n print('Inject output fault')\n if weight_fault_dict_list is None:\n weight_fault_dict_list=[[None,None] for i in range(8)]\n else:\n print('Inject weight fault')\n \n print('Building Layer 0')\n input_shape = Input(shape=input_shape, batch_shape=(batch_size,)+input_shape)\n print('Building Layer 1')\n x = QuantizedConv2D(filters=16,\n quantizers=layer_quantizer,\n kernel_size=(5,5),\n padding='same',\n strides=(1, 1), \n activation='relu',\n ifmap_sa_fault_injection=ifmap_fault_dict_list[1],\n ofmap_sa_fault_injection=ofmap_fault_dict_list[1],\n weight_sa_fault_injection=weight_fault_dict_list[1],\n quant_mode=quant_mode)(input_shape)\n print('Building Layer 2')\n x = MaxPooling2D(pool_size=(2,2))(x)\n print('Building Layer 3')\n x = QuantizedDistributedConv2D(filters=36,\n split_type=['channel','k_height','k_width'],\n splits=[[8,4,4],[2,3],5],\n quantizers=layer_quantizer,\n kernel_size=(5,5),\n padding='same',\n strides=(1, 1),\n ifmap_sa_fault_injection=ifmap_fault_dict_list[3],\n ofmap_sa_fault_injection=ofmap_fault_dict_list[3],\n weight_sa_fault_injection=weight_fault_dict_list[3],\n quant_mode=quant_mode)(x)\n print('Building Layer 4')\n x = Add()(x)\n print('Building Layer 5')\n x = Activation('relu')(x)\n\n print('Building Layer 6')\n x = MaxPooling2D(pool_size=(2,2))(x)\n print('Building Layer 7')\n x = QuantizedFlatten()(x)\n print('Building Layer 8')\n x = QuantizedDense(128,\n quantizers=layer_quantizer,\n activation='relu',\n ifmap_sa_fault_injection=ifmap_fault_dict_list[6],\n ofmap_sa_fault_injection=ofmap_fault_dict_list[6],\n weight_sa_fault_injection=weight_fault_dict_list[6],\n quant_mode=quant_mode)(x)\n print('Building Layer 9')\n x = QuantizedDense(num_classes,\n quantizers=layer_quantizer,\n activation='softmax',\n ifmap_sa_fault_injection=ifmap_fault_dict_list[7],\n ofmap_sa_fault_injection=ofmap_fault_dict_list[7],\n weight_sa_fault_injection=weight_fault_dict_list[7],\n quant_mode=quant_mode)(x)\n\n model=Model(inputs=input_shape, outputs=x)\n\n return model\n\n\nmodel=quantized_lenet5(nbits=8,fbits=3,rounding_method='nearest',batch_size=batch_size,quant_mode='hybrid')\n\nweight_name=convert_original_weight_layer_name(weight_name)\nmodel.load_weights(weight_name)\nprint('orginal weight loaded')\n\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy',top2_acc])\n\n\n\n#%%\n#dataset setup\n\nx_train, x_test, y_train, y_test, class_indices, datagen, input_shape = dataset_setup('mnist')\n\n#%%\n# view test result\nt = time.time()\n\ntest_result = model.evaluate(x_test, y_test, verbose=1, batch_size=batch_size)\n\nt = time.time()-t\nprint('\\nruntime: %f s'%t)\nprint('\\nTest loss:', test_result[0])\nprint('Test top1 accuracy:', test_result[1])\nprint('Test top2 accuracy:', test_result[2])\n\n#computaion_esti=comp_num_estimate(model)\n#print('\\nTotal # of computations:', computaion_esti['total_MAC'])\n#print('Total # of MAC bits:', computaion_esti['total_MAC_bits'])\n\n#%%\n# draw confusion matrix\n\nprint('\\n')\nprediction = model.predict(x_test, verbose=1,batch_size=batch_size)\nprediction = np.argmax(prediction, axis=1)\n\nshow_confusion_matrix(np.argmax(y_test, axis=1),prediction,class_indices,'Confusion Matrix',normalize=False)\n\n#%%\nK.clear_session()\n\n#%%\n\nweight_name='../cifar10_4C2FBN_weight_fused_BN.h5'\nbatch_size=25\n\nt = time.time()\nmodel=quantized_4C2F(nbits=12,\n fbits=6,\n rounding_method='nearest',\n batch_size=batch_size,\n quant_mode=None,)\n\nsplit_type=['channel','k_height','k_width']\ntarget_layers=[2,6]\nsplits=[[[11,11,10],[2,1],3 ],\n [2 ,3 ,[1,2]]]\n\nmodel=exchange_distributed_conv(model,target_layers,True,split_type,splits)\n\nt = time.time()-t\n\nprint('\\nModel build time: %f s'%t)\n\nprint('Model compiling...')\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy',top2_acc])\nprint('Model compiled !')\nmodel.load_weights(weight_name)\nprint('orginal weight loaded')\n\n#%%\n#dataset setup\n\nx_train, x_test, y_train, y_test, class_indices, datagen, input_shape = dataset_setup('cifar10')\n\n#%%\n# view test result\n\nt = time.time()\n\ntest_result = model.evaluate(x_test, y_test, verbose=1, batch_size=batch_size)\n\nt = time.time()-t\n \nprint('\\nruntime: %f s'%t)\nprint('\\nTest loss:', test_result[0])\nprint('Test top1 accuracy:', test_result[1])\nprint('Test top2 accuracy:', test_result[2])\n\n#%%\n# draw confusion matrix\n\nprint('\\n')\nprediction = model.predict(x_test, verbose=1, batch_size=batch_size)\nprediction = np.argmax(prediction, axis=1)\n\nshow_confusion_matrix(np.argmax(y_test, axis=1),prediction,class_indices,'Confusion Matrix',figsize=(8,6),normalize=False)\n\n\n\n","sub_path":"example_distributed_conv.py","file_name":"example_distributed_conv.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"36225857","text":"from django.conf import settings\nfrom django.utils.html import remove_tags\n\nimport pytils\nimport textile\n\n# get languages list from settings\n# if has_attr(\"descr_lng\")\n# set textile to that\n\nCUT_IND = \"++cut++\"\nif hasattr(settings, \"CUT_IND\"):\n CUT_IND = settings.CUT_IND\n\n\ndef textile_signal(sender, instance, **kwargs):\n setattr(instance, \"text\", textile.textile(getattr(instance, \"text_raw\")))\n for l in settings.LANGUAGES:\n if hasattr(instance, \"text_\" + l[0]):\n if instance.use_textile:\n setattr(instance, \"text_\" + l[0],\n textile.textile(\n getattr(instance,\n \"text_raw_\" + l[0])))\n else:\n setattr(instance, \"text_\" +\n l[0], getattr(instance, \"text_raw_\" + l[0]))\n\n\ndef slugify_signal(sender, instance, **kwargs):\n if not instance.slug:\n instance.slug = pytils.translit.slugify(instance.name)\n\n\ndef cut_signal(sender, instance, **kwargs):\n instance.cutted_text = remove_tags(\n instance.text, \"h1 h2 h3 h4 h5 h6 p div script style\")\n instance.text = instance.text.replace(CUT_IND, \"\")\n a = instance.cutted_text.find(CUT_IND)\n if a != -1:\n instance.cutted_text = instance.cutted_text[:a]\n for l in settings.LANGUAGES:\n if hasattr(instance, \"cutted_text_\" + l[0]) and hasattr(\n instance, \"text_\" + l[0]):\n setattr(instance, \"cutted_text_\" + l[0], remove_tags(\n getattr(instance, \"text_\" + l[0]),\n \"h1 h2 h3 h4 h5 h6 p div script style\"))\n setattr(instance, \"text_\" +\n l[0], getattr(instance, \"text_\" + l[0])\n .replace(CUT_IND, \"\"))\n a = getattr(instance, \"cutted_text_\" + l[0]).find(CUT_IND)\n if a != -1:\n setattr(instance, \"cutted_text_\" +\n l[0], getattr(instance, \"cutted_text_\" + l[0])[:a])\n","sub_path":"signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"412398620","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom operator import itemgetter\nfrom youtube_dl import YoutubeDL\nimport pyexcel\n\nurl = \"http://s.cafef.vn/bao-cao-tai-chinh/VNM/IncSta/2017/3/0/0/ket-qua-hoat-dong-kinh-doanh-cong-ty-co-phan-sua-viet-nam.chn\"\n\nconn = urlopen(url)\ncontent = conn.read().decode('utf-8')\n\nsoup = BeautifulSoup(content, 'html.parser')\n\nlist_of_content = []\n\n# Find bold line items\nline_item_bold = soup.find_all('td', style = \"width:32%;color:#014377;font-weight:bold;\")\nfor li in line_item_bold:\n dic = {}\n \n li = li.string.strip().split(maxsplit = 1)\n li_key = float(li[0].strip('\\''))\n dic['index'] = li_key\n\n li_value = li[1].strip('\\'')\n dic['line item'] = li_value\n list_of_content.append(dic)\n\n# Find values for bold line items\nvalue_bold = soup.find_all('td', style = \"width:15%;padding:4px;color:#014377;font-weight:bold;\")\n\ncount = 0\nfor l in list_of_content:\n for i in range(4):\n l['Q' + str(i + 1)] = value_bold[count].string\n count += 1\n\nlist_temp = []\n\n# Find normal line items\nline_item = soup.find_all('td', style = \"width:32%;color:#014377;\")\nfor li in line_item:\n dic = {}\n \n li = li.string.strip().split(maxsplit = 1)\n li_key = float(li[0].strip('\\''))\n dic['index'] = li_key\n\n li_value = li[1].strip('\\'')\n dic['line item'] = li_value\n list_temp.append(dic)\n \n#Find values for normal line items\nvalue = soup.find_all('td', style = \"width:15%;padding:4px;color:#014377;\")\n\ncount = 0\nfor l in list_temp:\n for i in range(4):\n l['Q' + str(i + 1)] = value[count].string\n count += 1\n\n# Merge 2 lists\nlist_of_content = list_of_content + list_temp\n\nlist_of_content = sorted(list_of_content, key = itemgetter('index'))\n\npyexcel.save_as(records = list_of_content, dest_file_name = 'bctc.xlsx')","sub_path":"Lab02/HW/cafefvn_v2.py","file_name":"cafefvn_v2.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"463520558","text":"from flask import Flask\nfrom cloudinary import uploader\nfrom rasberry.switch import Camera\nimport cloudinary\nimport dotenv\nimport json\nimport time\n\ndotenv.load()\napp = Flask(__name__)\n\ncloudinary.config(\n cloud_name=dotenv.get('cloud_name'),\n api_key=dotenv.get('api_key'),\n api_secret=dotenv.get('api_secret')\n)\n\nPI_SIGNALS = {\n \"0\": \"Close\",\n \"1\": \"Open\"\n}\n\nLOCKED_OR_UNLOCKED = {\n \"0\": \"Magnetized\",\n \"1\": \"Demagnetized\"\n}\n\n# piSignalLocked\nMESSAGE_STATUS = {\n \"00\": \"Closed and Magnetized, red or locked\",\n \"01\": \"Closed and Demagnetized, Error\",\n \"10\": \"Opened and Magnetized, amber\",\n \"11\": \"Opened and Demagnetized, green\"\n}\n\n\nclass Simian():\n\n def __init__(self, defaultState=0):\n self.state = defaultState\n\n def set_state(self, newState):\n if newState in [0,1]:\n self.state = newState\n else:\n print(\"Error: State can only be 0 0r 1\")\n\n def get_state(self):\n return self.state\n\nclass RasberryPi():\n\n def __init__(self, simian, camera, doorStatus=0):\n self.boundSimian = simian\n self.boundCam = camera\n self.doorStatus = doorStatus\n\n def get_door_status(self):\n return self.doorStatus\n\n def set_door_status(self, newDoorStatus):\n self.doorStatus = newDoorStatus\n\n def snap(self):\n return self.boundCam.snap()\n\n def action(self, result, image=False):\n if int(result):\n self.set_door_status(1) # communicate with api to let know state is changed\n return \"open door\"\n else:\n return \"remain locked\"\n\n \n def send_mail(self, image_url):\n return \"mail sent\"\n\n def button_press(self):\n # function that communicates with camera to take a picture\n # give a timer buffer\n image_dir = self.snap()\n image_info = uploader.upload(image_dir)\n self.send_mail(image_info[\"secure_url\"])\n return image_info[\"secure_url\"]\n\nsimian = Simian()\ncam = Camera()\npi = RasberryPi(simian, cam)\n\n@app.route('/')\ndef api_root():\n person_at_the_door = pi.button_press()\n return 'Welcome {}'.format(person_at_the_door)\n\n@app.route('/api/state/', methods = ['GET'])\ndef api_state():\n return json.dumps({\n 'message':MESSAGE_STATUS[str(pi.get_door_status())+str(simian.get_state())],\n 'state': simian.get_state()\n })\n\n@app.route('/api/door/close', methods = ['POST'])\ndef api_door_close(state):\n return pi.action(\"0\")\n\n@app.route('/api/door/open', methods = ['POST'])\ndef api_door_open(state):\n return pi.action(\"1\")\n\n@app.route('/api/cloudinary')\ndef api_cloudinary():\n return cloud.get_link()\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"backend/simian_app.py","file_name":"simian_app.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"418617692","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom poloniex_dao import PoloniexDAO\n\n\n\ndef discreteDataset(symbol, price_change_threshold):\n events = loadEvents(symbol)\n events = thresholdEvents(events, price_change_threshold)\n return splitTrainTest(events,0.7)\n\n\ndef bufferedDataset(symbol, price_change_threshold, buffer_len = 3):\n events = loadEvents(symbol)\n events = thresholdEvents(events, price_change_threshold)\n return splitBufferedTrainTest(events, 0.7)\n\n\ndef thresholdEvents(events, price_change_threshold):\n results = []\n i = 0\n while i < len(events) - 1:\n start_price = events[i][1]\n sell_count = events[i][2]\n sell_vol = events[i][3]\n buy_count = events[i][4]\n buy_vol = events[i][5]\n j = i + 1\n while j < len(events) and abs( events[j][1] - start_price ) < ( start_price * price_change_threshold):\n sell_count += events[j][2]\n sell_vol += events[j][2]\n buy_count += events[j][4]\n buy_vol += events[j][4]\n j = j + 1\n end_time = events[j-1][0]\n end_price = events[j-1][1]\n results.append( [ end_time, end_price, sell_count, sell_vol, buy_count, buy_vol ] )\n i = j\n return results\n\ndef splitTrainTest(events, percent=0.7):\n n_tr = int(len(events)*percent)\n # Training\n X_tr = []\n Y_tr = []\n for i in range(0, n_tr ):\n X_tr.append( events[i] )\n Y_tr.append( events[i+1][1] - events[i][1] )\n # Testing\n X_ts = []\n Y_ts = []\n for i in range(n_tr , len(events) -1 ):\n X_ts.append( events[i] )\n Y_ts.append( events[i+1][1] - events[i][1] )\n\n return X_tr, Y_tr, X_ts, Y_ts\n\ndef splitBufferedTrainTest(events, percent=0.7):\n n_tr = int(len(events)*percent)\n\n nb_buffer = len(events[0])\n # Training\n X_tr = []\n Y_tr = []\n for i in range(0, n_tr ):\n X_tr.append( events[i] )\n Y_tr.append( events[i+1][nb_buffer][1] - events[i][nb_buffer][1] )\n # Testing\n X_ts = []\n Y_ts = []\n for i in range(n_tr , len(events) -1 ):\n X_ts.append( events[i] )\n Y_ts.append( events[i+1][nb_buffer][1] - events[i][nb_buffer][1] )\n\n return X_tr, Y_tr, X_ts, Y_ts\n\ndef buffer( events, buffer ):\n result = []\n for i in range( len(events) - buffer - 1 ):\n result.append( X[ i:(i+buffer) ] )\n return events\n\ndef loadEvents(symbol):\n config = {}\n with open('../config.json', 'r') as f:\n config = json.load(f)\n symbols = config[\"symbols\"]\n timestep = config[\"timestep\"]\n host = config[\"host\"]\n name = config[\"name\"]\n user = config[\"user\"]\n pswd = config[\"pswd\"]\n dao = PoloniexDAO(host, name, user, pswd)\n return dao.allEvents(symbol)\n","sub_path":"src/poloniex_datasets.py","file_name":"poloniex_datasets.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"263515400","text":"'''\n구구단 프로그램 짜보기\n함수 이름:GuGu\n입력받는값:구구단 몇단인지\n출력하는값:입력받은 구구단 단의 결과\n결과를 저장할 자료형:리스트\n'''\ndef gugu(dan):\n dan_result = []\n for i in range(1, 10):\n dan_result.append(dan*i)\n return dan_result\n\nprint(gugu(2))","sub_path":"python/6. 파이썬 프로그래밍, 어떻게 시작해야 할까/gugu.py","file_name":"gugu.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"98889120","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nAuthor: Danry Li\n\n主程序:重庆二手房房价预测\n\n\"\"\"\nfrom house_price_prediction.fetch_data import main as fetch_house_data\nfrom MapCrawler.TransitMatrix import main as get_efficiency\nfrom house_price_prediction.integrate_data import main as integrate_efficiency\nfrom house_price_prediction.organize_data import main as organize_data\nfrom house_price_prediction.train_model import ML_Model\n\n\nif __name__ == \"__main__\":\n\n # 抓取安居客网站的二手房源信息,并存储为csv文件\n fetch_house_data()\n\n # 利用百度地图API服务,针对每个房源计算所在区位的出行效率值,\n # 并整合进房源信息中\n baiduAPI_ak = ''\n get_efficiency(ak=baiduAPI_ak)\n integrate_efficiency(ak=baiduAPI_ak)\n\n # 梳理、修剪、重构安居客网站的二手房源信息,以便后续神经网络的训练。\n organize_data()\n\n # 建立房价关于房源信息的基于神经网络的回归模型,\n # 并训练模型,输出预测值与真实值的散点图\n model = ML_Model()\n params = {\n \"lr\": 0.0001,\n \"weight_decay\": 1324,\n \"batch_size\": 64,\n }\n model.train_model(5, 500, **params)\n","sub_path":"HousePricePredict.py","file_name":"HousePricePredict.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"115173530","text":"import pygame\r\n\r\nsheet = pygame.image.load('res/sheet.png')\r\nscale = 2\r\nimgs = {}\r\n\r\nwith open('res/sheet map.txt') as sheet_map:\r\n\tfor line in sheet_map:\r\n\t\tif line[0] in ('#', '\\n'):\r\n\t\t\tcontinue\r\n\r\n\t\tline = line.split(' ')\r\n\t\tname = line[0]\r\n\t\tx = int(line[1])\r\n\t\ty = int(line[2])\r\n\t\tw = int(line[3])\r\n\t\th = int(line[4])\r\n\r\n\t\timg = sheet.subsurface((x, y, w, h))\r\n\t\timg = pygame.transform.scale(img, (int(w*scale), int(h*scale)))\r\n\t\timgs[name] = img\r\n\r\ndef get(name):\r\n\treturn imgs[name]\r\n\r\n\t","sub_path":"games/gamejam/sheet.py","file_name":"sheet.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185866248","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 17:55:09 2018\n\n@author: joans\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom functools import reduce\nimport os\n\nfrom shapely.geometry import Polygon, Point # MultiPolygon, \n\nfrom unosat_annotation import UnosatAnnotation\nfrom damage import Damage\nfrom unosat_annotation import Damage_Levels\nfrom sample import Sample, SampleShower\n\n\n\"\"\"\nTODO\n\"\"\"\nclass SplitNSites():\n def __init__(self, sites_train, sites_test, damage_levels,\n negs_per_pos, pre_and_post, patch_size,\n min_distance_pos_neg):\n for dl in damage_levels:\n assert dl in Damage_Levels[:-1] # exclude 'No visible damage'\n\n assert negs_per_pos > 0.0\n assert patch_size % 2 == 0\n assert min_distance_pos_neg >= patch_size\n \n self.sites_train = sites_train\n self.sites_test = sites_test\n self.damage_levels = damage_levels\n self.negs_per_pos = negs_per_pos\n self.pre_and_post = pre_and_post\n self.patch_size = patch_size\n self.min_distance_pos_neg = min_distance_pos_neg \n self.half_size = int(self.patch_size/2.0)\n\n self.damages_train, self.damages_test = self._make_damages()\n self.positives_train, self.negatives_train, \\\n self.positives_test, self.negatives_test = \\\n self._make_positives_and_negatives_all_damages()\n \n\n def _make_positives_and_negatives_all_damages(self):\n positives_train = []\n negatives_train = []\n positives_test = []\n negatives_test = []\n num_damages_train = len(self.damages_train)\n num_damages_test = len(self.damages_test)\n offset = 0\n for nd, dam in enumerate(self.damages_train + self.damages_test):\n all_pairs = (nd no destruction at all of any level). This is what we call the \"[0,k] pairs\". \"\"\"\n pos = [] # list of Sample objects\n for dam_lev in self.damage_levels:\n print('Damage level {}'.format(dam_lev))\n for clf in range(1, damage.num_classifications+1):\n new_pos = []\n rows_cols = damage.get_rows_cols(clf, dam_lev)\n for row, col in rows_cols:\n if self._is_patch_inside_image(row, col, damage):\n if not self.pre_and_post:\n s = Sample(dam_lev, row, col, self.patch_size, [clf])\n else:\n s = Sample(dam_lev, row, col, self.patch_size, [0, clf])\n new_pos.append(s)\n \n print('classification {} : made {} positives'\\\n .format(clf, len(new_pos)))\n\n pos.extend(new_pos) \n\n return pos\n \n\n\n def _make_positives_all_pairs(self, damage):\n \"\"\"\n Now a sample is a change from no damage to some level of damage\n and we will take all possible triplets (point, image1, image2)\n for which this happens, with image1 including the pre-image : if\n we have images 0=Pre, 1, 2, ... k ...n and point p has some\n damage level *and* state='New - damage' at image/classification k,\n then such triplets are (p, l, m) for l=0...k-1, m=k...n :\n - (p, 0, k), (p, 1, k) ... (p, k-1, k) because from 0...k-1 p is\n 'no damage' and at k is (some) new damage\n - (p, 0, k+1), (p, 1, k+1), ... (p, k-1, k+1) because from 0...k-1\n p is 'no damage' and in k+1 is no change\n - (p, 0, k+2), (p, 1, k+2), ... (p, k-1, k+2) same but in k+2...\n - (p, 0, n), (p, 1, n), ... (p, k-1, n) same but in n\n We are assuming there is no increase or decrease of damage level,\n once changed from no to some level, it's static\n We have to do this except for l,m == the classification test.\n This is what we call the \"[j,k] pairs\".\n \"\"\"\n assert self.pre_and_post\n pos = []\n for dam_lev in self.damage_levels:\n for clf in range(1,damage.num_classifications+1):\n if clf==1:\n \"\"\" in the first classification there is no state, every damage is new \"\"\"\n rows_cols = damage.get_rows_cols(clf, dam_lev)\n else:\n \"\"\" classifications 2, 3 ... n have a state \"\"\"\n rows_cols = damage.get_rows_cols(clf, dam_lev, 'New - damage')\n\n print('level {}, classification {} : {} points with state New - damage'\\\n .format(dam_lev, clf, len(rows_cols)))\n\n new_pos = []\n for row, col in rows_cols:\n if self._is_patch_inside_image(row, col, damage):\n for l in range(0, clf): # 0... k-1\n for m in range(clf, damage.num_classifications+1): # k...n\n s = Sample(dam_lev, row, col, self.patch_size, [l, m])\n new_pos.append(s)\n\n print('give rise to {} positives train'.format(len(new_pos)))\n pos.extend(new_pos)\n\n return pos\n\n \n def _is_patch_inside_image(self, row, col, damage):\n return (row >= self.half_size) and (row < damage.height - self.half_size -1) \\\n and (col >= self.half_size) and (col < damage.width - self.half_size -1)\n \n\n def _make_negatives(self, damage, positives, all_pairs):\n \"\"\" the positives are formed by all points in *some* classification \n with some damage \"\"\"\n points_pos = np.array([[s.row, s.col] for s in positives])\n points_pos = np.unique(points_pos, axis=0)\n print('{} positive samples, {} unique positive points'\\\n .format(len(positives), len(points_pos)))\n neg = []\n num_pos_samples = len(positives)\n num_neg_samples = int(num_pos_samples*self.negs_per_pos)\n if all_pairs:\n num_neg_points = num_neg_samples//(damage.num_classifications*(damage.num_classifications+1)//2)\n else:\n num_neg_points = num_neg_samples//damage.num_classifications\n print('{} positive samples (not points), {} negative samples to make'\n ' in {} classifications => {} negative points to sample'\\\n .format(num_pos_samples, num_neg_samples, damage.num_classifications, \n num_neg_points))\n \"\"\" because we will make a negative sample for each negative point\n *and* classification \"\"\"\n points_neg = self._make_points_negatives(num_neg_points, points_pos, damage)\n\n if all_pairs:\n for p in points_neg:\n for idx1 in range(0, damage.num_classifications):\n for idx2 in range(idx1+1, damage.num_classifications+1):\n s = Sample('No Visible Damage', p[0], p[1], self.patch_size, [idx1, idx2])\n neg.append(s)\n else:\n for p in points_neg:\n for clf in range(1, damage.num_classifications+1):\n \"\"\" a negative is negative for all the classifications because\n it does not overlap any positive in any classification \"\"\"\n if not self.pre_and_post:\n s = Sample('No Visible Damage', p[0], p[1], self.patch_size, [clf])\n else:\n s = Sample('No Visible Damage', p[0], p[1], self.patch_size, [0, clf])\n\n neg.append(s)\n \n print('{} negatives from {} classifications'.\\\n format(len(neg), damage.num_classifications))\n return neg\n\n\n def _make_points_negatives(self, num_negatives, points_pos, damage):\n points_neg = []\n while len(points_neg)0:\n # un negatiu no pot solapar-se amb un positiu, si ho fa el \n # canviem per un altre negatiu, i aixi fins que no calgui \n # canviar res. Paso un parametre que es la distancia minima \n # entre positius i negatius, i que ha de ser patch_size o mes.\n num_overlaped = 0\n to_remove = []\n for neg in pnew_negatives:\n if self._overlaps(neg, points_pos, self.min_distance_pos_neg):\n to_remove.append(neg)\n \"\"\" do not change new_negatives within the loop! \"\"\"\n num_overlaped += 1\n for p in to_remove:\n pnew_negatives.remove(p)\n #print('sampled {} new negatives from which {} with overlapping'\\\n # .format(num_to_sample, num_overlaped))\n points_neg += pnew_negatives\n \n return np.array(points_neg)\n\n\n def _overlaps(self, p, points, min_dist):\n array_points = np.array(points)\n if len(array_points)==0:\n return False\n else:\n return np.any(np.logical_and(\n np.abs(p[0]-array_points[:,0])=init_row, r<=last_row))\n assert np.all(np.logical_and(c>=init_col, c<=last_col))\n return points\n \n \n def _is_inside(self, poly, r,c):\n return poly.contains(Point([c,r]))\n \n\n def _check_no_overlaping_pos_neg(self, positives, negatives):\n \"\"\" check no positive overlaps a negative \"\"\"\n points_neg = np.array([[s.row, s.col] for s in negatives])\n for pos in positives :\n point_pos = [pos.row, pos.col]\n if self._overlaps(point_pos, points_neg, self.min_distance_pos_neg):\n print('\\n*** Error : the positive {} overlaps some negative'\n .format(point_pos))\n assert False\n\n\n \"\"\" saves current object to later get polygon and coordinates of \n positives and negatives etc. Referenced object Damage can no be saved\n by pickle \"\"\"\n def save(self):\n if self.damage_levels==['Destroyed']:\n damlev = 'D'\n elif self.damage_levels==['Severe Damage', 'Destroyed']:\n damlev = 'DS'\n elif self.damage_levels==['Moderate Damage', 'Severe Damage', 'Destroyed']:\n damlev = 'DSM'\n else:\n assert False\n\n names_train = ''\n for dam in self.damages_train:\n names_train += dam.site+'+'\n names_train = names_train[:-1] # remove last '+'\n names_test = ''\n for dam in self.damages_test:\n names_test += dam.site+'+'\n names_test = names_test[:-1]\n fname = 'split_n_sites_{}_{}_{}_{}_{}_{}.pkl'\\\n .format(names_train, names_test, damlev,\\\n 'preandpost' if self.pre_and_post else 'post', \\\n self.negs_per_pos, self.patch_size)\n fname = os.path.join('split_n_sites', fname)\n\n with open(fname, 'wb') as f:\n # set damages to None to be able to save\n for damage in self.damages_train + self.damages_test:\n damage.datasets = None\n print('*** train and test damage datasets set to None')\n pickle.dump(self, f)\n print('saved SplitNSites object to {}'.format(fname))\n for damage in self.damages_train + self.damages_test:\n damage.datasets = damage._load_datasets()\n print('*** train and test damage datasets restored')\n\n\n # static method to load a previously saved split \n def load(fname):\n with open(fname, 'rb') as f:\n split = pickle.load(f)\n for damage in split.damages_train + split.damages_test:\n damage.datasets = damage._load_datasets()\n return split\n \n\n def plot(self): \n for damage in self.damages_train + self.damages_test:\n for clf in range(1, damage.num_classifications+1):\n plt.figure() \n rows_cols = self._vertices_multipolygon(damage.PopulatedPolyPx)\n for r,c in rows_cols:\n plt.plot(c, r, 'r.-')\n \n rows_cols = self._vertices_multipolygon(damage.NoAnalysisPolyPx)\n for r,c in rows_cols:\n plt.plot(c, r, 'm.-')\n \n \"\"\" -1 index works for both pre_and_post True and False \"\"\"\n points_pos = np.array([[p.row, p.col] for p in self.positives \n if p.indices_datasets[-1]==clf]) \n points_neg = np.array([[p.row, p.col] for p in self.negatives \n if p.indices_datasets[-1]==clf]) \n plt.scatter(points_neg[:,1], points_neg[:,0], marker='_', \n color='orange', label='negatives')\n plt.scatter(points_pos[:,1], points_pos[:,0], marker='+', \n color='g', label='positives')\n plt.legend()\n plt.axis('equal')\n ylim = list(plt.ylim())\n ylim.reverse()\n plt.ylim(ylim)\n plt.title('{}, classification {}'.format(damage.site, clf))\n plt.show(block=False)\n \n \n def _vertices_multipolygon(self, multipolygon):\n rows_cols = []\n for pol in multipolygon.geoms:\n coords = np.array(pol.exterior.coords)\n pol_cols = coords[:,0]\n pol_rows = coords[:,1]\n rows_cols.append([pol_rows, pol_cols])\n return rows_cols\n\n\n\ndef make_split(sites_train, site_test):\n damage_levels = ['Destroyed']\n # damage_levels = ['Severe Damage', 'Destroyed']\n # damage_levels = ['Moderate Damage', 'Severe Damage', 'Destroyed']\n patch_size = 64\n pre_and_post = True # False\n min_distance_pos_neg = 2*patch_size\n negs_per_pos = 20\n dmg_levs = reduce(lambda a,b:a+b, [d[0] for d in damage_levels]) \n # 'D', 'SD' or 'MSD'\n\n print('Making split n sites for train sites {} and test sites {}, '\\\n 'damage levels {}, {}, {} negs per pos, patch size {}'\\\n .format(sites_train, sites_test, dmg_levs, \\\n 'preandpost' if pre_and_post else 'post', \\\n negs_per_pos, patch_size))\n print(40*'-')\n split = SplitNSites(sites_train, sites_test, damage_levels,\n negs_per_pos, pre_and_post, patch_size,\n min_distance_pos_neg)\n print('\\nDone split_n_sites\\n')\n return split\n\n\n\ndef show_samples(samples, images):\n num_samples = len(samples)\n plt.figure()\n plt.subplot(2, num_samples, 1)\n for i,s in enumerate(samples):\n idx_pre = s.indices_datasets[0]\n idx_post = s.indices_datasets[1]\n crop1 = images[idx_pre][s.row-s.patch_size//2 : s.row+s.patch_size//2,\n s.col-s.patch_size//2 : s.col+s.patch_size//2]\n crop2 = images[idx_post][s.row-s.patch_size//2 : s.row+s.patch_size//2,\n s.col-s.patch_size//2 : s.col+s.patch_size//2]\n plt.subplot(2, num_samples, i+1)\n plt.imshow(crop1)\n plt.axis('off')\n plt.title('{}, \\npre {}, post {}'.format(s.level, idx_pre, idx_post), fontsize=8)\n plt.subplot(2, num_samples, i+1+num_samples)\n plt.imshow(crop2)\n plt.axis('off')\n \n plt.show(block=False)\n \n\nif __name__ == '__main__':\n if True:\n sites_train = ['homs',]\n sites_test = ['aleppo',]\n split = make_split(sites_train, sites_test)\n split.save()\n \n if True:\n fname = os.path.join('split_n_sites', 'split_n_sites_homs_aleppo_D_preandpost_20_64.pkl')\n # 20 negs per pos sample\n split = SplitNSites.load(fname)\n \n if True:\n images = split.images()\n num_samples = 10\n idx = np.random.randint(low=0,high=len(split.positives_train)+1, size=num_samples)\n samples = np.array(split.positives_train)[idx]\n show_samples(samples, images)\n idx = np.random.randint(low=0,high=len(split.negatives_train)+1, size=num_samples)\n samples = np.array(split.negatives_train)[idx]\n show_samples(samples, images)\n idx = np.random.randint(low=0,high=len(split.positives_test)+1, size=num_samples)\n samples = np.array(split.positives_test)[idx]\n show_samples(samples, images)\n idx = np.random.randint(low=0,high=len(split.negatives_test)+1, size=num_samples)\n samples = np.array(split.negatives_test)[idx]\n show_samples(samples, images)\n \n\n \n","sub_path":"split_n_sites.py","file_name":"split_n_sites.py","file_ext":"py","file_size_in_byte":22512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"549314348","text":"from __future__ import print_function #, unicode_literals\nfrom __future__ import absolute_import, division\ntry:\n xrange = xrange\n # We have Python 2\nexcept:\n xrange = range\n # We have Python 3\n\nimport numpy as np\n\ndef read_histogram(filename, index=[0], header=False):\n \"\"\"\n Reads histogram\n\n Parameters\n ----------\n filename: str\n full name of the histogram file\n index: list of int\n column number used as index, default 0\n header: bool\n Does the file contain a header with column names? Default False.\n\n Return\n ------\n hist: dict of columns\n histogram, contains 'index' column\n \"\"\"\n\n with open(filename, 'r') as f:\n\n if header:\n columns = re.findall('\\S+', f.readline())\n\n hist_arr = []\n for line in iter(f.readline, ''):\n hist_arr.append(list(map(float, re.findall('\\S+', line))))\n\n hist_arr = np.array(hist_arr)\n\n if not header:\n columns = [str(i) for i in range(1, hist.shape[1])]\n\n hist = {}\n for i, col in enumerate(columns):\n hist[col] = hist\n\n return hist\n","sub_path":"s2lib/read_write/read_histogram.py","file_name":"read_histogram.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"178213769","text":"import imageio\nimport numpy as np\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\n\nfrom torchvision.utils import save_image\n\nto_pil_image = transforms.ToPILImage()\n\ndef image_to_vid(images):\n imgs = [np.array(to_pil_image(img)) for img in images]\n imageio.mimsave('../outputs/generated_images.gif', imgs)\n \ndef save_reconstructed_images(recon_images, epoch):\n save_image(recon_images.cpu(), f\"../outputs/output{epoch}.jpg\")\n \ndef save_loss_plot(train_loss, valid_loss):\n # loss plots\n plt.figure(figsize=(10, 7))\n plt.plot(train_loss, color='orange', label='train loss')\n plt.plot(valid_loss, color='red', label='validataion loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('../outputs/loss.jpg')\n plt.show()","sub_path":"week1/conVAE/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"83736297","text":"import tkinter as tk\n#--↓主視窗↓--#\nwindow = tk.Tk() # 建立主視窗\nwindow.geometry('640x480') # 設定尺寸為 640x480\nwindow.title('YouTube 極速下載器') # 設定主視窗標題\n#--↓ Frame ↓--#\ninput_fm = tk.Frame(window, bg='red', # 建立 Frame\n width=640, height=120)\ninput_fm.pack() # 設定排版方式\n#--↓啟動主視窗↓--#\nwindow.mainloop() ","sub_path":"ML from Courses/FT700/ch08/8-1.py","file_name":"8-1.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"160909666","text":"import RPi.GPIO as GPIO # Gestion des GPIO\nimport subprocess\nfrom time import sleep # Gestion du temps\nfrom IPython.display import clear_output\n\nGPIO.setmode(GPIO.BCM) # La numerotation choisie\nGPIO.setup(16, GPIO.IN) # Une entree : le poussoir\n\n\ndef my_callback(channel):\n\tif GPIO.input(channel):\n \t\tsubprocess.call(\"twinkle --immediate --call sip:300@192.168.1.29\", shell=True)\n\n\nprint(\"Vous pouvez aussi terminer avec CTRL+C \\n\")\nGPIO.add_event_detect(16, GPIO.BOTH, callback=my_callback)\n\nwhile True:\n\tsleep(30)\n\tclear_output()\n","sub_path":"interphone_origin.py","file_name":"interphone_origin.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"484021341","text":"#!/usr/bin/env python\n\nfrom operator import attrgetter\nfrom typing import List, NamedTuple\n\n\nclass Interval(NamedTuple):\n 'An interval is a product of a start and a finish-time.'\n start: int\n finish: int\n\n\ndef schedule(intervals: List[Interval]) -> List[Interval]:\n 'Returns the greates subset of non-overlapping intervals.'\n\n i: List[Interval] = sorted(intervals, key=attrgetter('finish'))\n o: List[Interval] = []\n\n while i:\n sel: Interval = i[0]\n\n o.append(sel)\n for tmp in list(i):\n if sel.start <= tmp.start <= sel.finish or tmp.start <= sel.start <= tmp.finish:\n i.remove(tmp)\n\n return o\n\n\nif __name__ == '__main__':\n intervals: List[Interval] = [\n Interval(2, 4), Interval(1, 5), Interval(3, 3), Interval(4, 6)]\n\n print(schedule(intervals))\n","sub_path":"02/interval-scheduling.py","file_name":"interval-scheduling.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"497232209","text":"# 160. Intersection of Two Linked Lists QuestionEditorial Solution\n\n# Write a program to find the node at which the intersection of two singly linked lists begins.\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n lenA, nodeA = 0, headA\n while nodeA:\n lenA += 1\n nodeA = nodeA.next\n \n lenB, nodeB = 0, headB\n while nodeB:\n lenB += 1\n nodeB = nodeB.next\n \n nodeA, nodeB = headA, headB\n if lenA <= lenB:\n short, long = headA, headB\n else:\n short, long = headB, headA\n for _ in range(abs(lenB-lenA)):\n long = long.next\n while short:\n if short == long:\n return short\n short = short.next\n long = long.next\n return None\n","sub_path":"IntersectionofTwoLinkedLists.py","file_name":"IntersectionofTwoLinkedLists.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"174726803","text":"from collections import defaultdict\nfrom nltk.corpus import wordnet as wn, wordnet_ic\nfrom random import sample\nimport numpy as np\nimport os\nfrom wn_similarity import disamb_group as ngroup\n\nSERVER_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nDIRECTORY_ROOT = os.getcwd()\nbrown_ic = wordnet_ic.ic('ic-brown.dat')\n\ndef get_rel_wn_syn(syn, hyperDepth=1, hypoDepth=1, sisterDepth=1):\n \"\"\"\n Purpose: Get all related synsets\n Input: WordNet synset (string)\n Output: a list of related synsets (including sisterms(往上抓一層的hypernym再往下抓一層hyponym), part holonyms +\n member holonyms + member holonyms) (Data type: list)\n \"\"\"\n #syn = wn.synset(syn)\n hypo = lambda s: s.hyponyms()\n hyper = lambda s: s.hypernyms()\n \n # hypernyms\n hypernyms = list(syn.closure(hyper, depth=hyperDepth))# get hypernyms (with the depth of 1 level)\n \n # hyponyms\n hyponyms = list(syn.closure(hypo, depth=hypoDepth))# get hypernyms (with the depth of 1 level)\n \n # sister synsets (hyponyms of hypernyms)\n sisters = []\n for h in hypernyms:\n for s in list(h.closure(hypo, depth=sisterDepth)):\n sisters.append(s)\n \n part_holonyms = [ z.synset() for y in list(syn.closure(lambda syn: syn.part_holonyms())) for z in y.lemmas() ]\n member_holonyms = [ z.synset() for y in list(syn.closure(lambda syn: syn.member_holonyms(), depth=1)) for z in y.lemmas() ]\n part_meronyms = [ z.synset() for y in list(syn.closure(lambda syn: syn.part_meronyms(), depth=1)) for z in y.lemmas() ]\n \n related_terms = hypernyms + hyponyms + sisters + part_holonyms + member_holonyms + part_meronyms\n \n return related_terms\n\n\n\ndef trans_wikipages(wikipages, UPBOUND_disambGroup=50):\n \n wikipageGroup = wikipages if len(wikipages) < UPBOUND_disambGroup else sample(wikipages, UPBOUND_disambGroup)\n tmpSenses_ = [ wn.synsets(wikipageGroup[i], pos=wn.NOUN) for i in range(len(wikipageGroup)) ]\n wikiSyns = ngroup.disambGroup(wikipageGroup, tmpSenses_, pos=wn.NOUN)\n\n # 如果 disambiguate 失敗 (wikiSyns為空),而還有尚未 disambigate 的 wikipages 可用 (len(wikipages)!=0),就繼續 sample 下一組 wikipage\n while len(wikiSyns) == 0 and len(wikipages) != 0:\n # 把 disambiguate 失敗的 wikipage 從 wikipages 刪掉,再sample 其他 wikipage 出來\n wikipages = list(set(wikipages) - set(wikipageGroup))\n wikipageGroup = wikipages if len(wikipages) < UPBOUND_disambGroup else sample(wikipages, UPBOUND_disambGroup)\n tmpSenses_ = [ wn.synsets(wikipageGroup[i], pos=wn.NOUN) for i in range(len(wikipageGroup)) ]\n wikiSyns = ngroup.disambGroup(wikipageGroup, tmpSenses_, pos=wn.NOUN)\n \n return wikiSyns\n\n\n\ndef wiki_wn_group_single_link(wiki_pgs, wn_terms):\n \"\"\"\n Purpose: Calculate and determine the max similarity between WordNet related words and Wiki sister pages\n Input: [wiki_pgs]: wiki sister pages (list of synsets); [wn_terms]: WordNet related terms (list of synsets)\n Output: max similarity (float)\n \"\"\"\n max_sim = -1\n max_wiki_syn = 'None'\n max_wn_syn = 'None'\n \n def sigmoid(x):\n return 1/(1 + np.exp(-x))\n \n for wn_term in wn_terms:\n for wiki_pg in wiki_pgs:\n pair_sim = wn_term.res_similarity(wiki_pg, brown_ic)\n if pair_sim > max_sim:\n max_sim = pair_sim\n max_wiki_syn = wiki_pg\n max_wn_syn = wn_term\n \n try:\n max_wiki_syn = max_wiki_syn.name()\n max_wn_syn = max_wn_syn.name()\n max_sim = str(max_sim)\n except AttributeError:\n max_sim = str(max_sim)\n pass\n \n return max_wiki_syn, max_wn_syn, max_sim\n \n","sub_path":"wn_similarity/wnSim_align.py","file_name":"wnSim_align.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358547989","text":"from datetime import datetime\nfrom typing import Callable\n\n\ndef verify(module):\n func_name = \"make_headers\"\n assert hasattr(\n module, func_name\n ), f\"module {module.__name__} has no attribute {func_name}\"\n\n make_headers = getattr(module, func_name)\n assert isinstance(\n make_headers, Callable\n ), f\"entity {module.__name__}.{func_name} is not a function\"\n\n headers_dict = {\n \"Connection\": \"keep-alive\",\n \"Content-Length\": 4053,\n \"Date\": datetime(year=2020, month=1, day=1, hour=1, minute=1, second=1),\n }\n\n headers_got = make_headers(headers_dict)\n\n headers_expected = \"\\n\".join(\n (\n \"Connection: keep-alive\",\n \"Content-Length: 4053\",\n \"Date: Wed, 1 Jan 2020 01:01:01 GMT\",\n )\n )\n\n assert headers_got == headers_expected\n\n\ndef test(modules_level03):\n for module in modules_level03.values():\n verify(module)\n","sub_path":"lessons/lesson13/tests/test_level03.py","file_name":"test_level03.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"492622286","text":"import requests\n\n__all__ = ['Client']\n\n\nclass Client:\n @staticmethod\n def send(data: dict):\n url = \"https://calant.azurewebsites.net/api/leadin/index.php\"\n params = \\\n {\n 'name': '1',\n 'phone': data['phone'],\n 'tax': '1'\n }\n requests.post(url=url, json=params)","sub_path":"app/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630570772","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport warnings\n\nfrom qtpy import QtCore, QtGui, QtWidgets\nfrom qtpy.QtCore import Qt\n\nfrom glue.external import six\nfrom glue.core.callback_property import add_callback\nfrom glue.viewers.common.qt.tool import CheckableTool\nfrom glue.icons.qt import get_icon\n\n__all__ = ['BasicToolbar']\n\n\nclass BasicToolbar(QtWidgets.QToolBar):\n\n tool_activated = QtCore.Signal()\n tool_deactivated = QtCore.Signal()\n\n def __init__(self, parent):\n \"\"\"\n Create a new toolbar object\n \"\"\"\n\n super(BasicToolbar, self).__init__(parent=parent)\n\n self.actions = {}\n self.tools = {}\n self.setIconSize(QtCore.QSize(25, 25))\n self.layout().setSpacing(1)\n self.setFocusPolicy(Qt.StrongFocus)\n self._active_tool = None\n self.setup_default_modes()\n\n def setup_default_modes(self):\n pass\n\n @property\n def active_tool(self):\n return self._active_tool\n\n @active_tool.setter\n def active_tool(self, new_tool):\n\n old_tool = self._active_tool\n\n # If the tool is as before, we don't need to do anything\n if old_tool is new_tool:\n return\n\n # Otheriwse, if the tool changes, then we need to disable the previous\n # tool...\n if old_tool is not None:\n self.deactivate_tool(old_tool)\n if isinstance(old_tool, CheckableTool):\n button = self.actions[old_tool.tool_id]\n if button.isChecked():\n button.blockSignals(True)\n button.setChecked(False)\n button.blockSignals(False)\n\n # ... and enable the new one\n if new_tool is not None:\n self.activate_tool(new_tool)\n if isinstance(new_tool, CheckableTool):\n button = self.actions[new_tool.tool_id]\n if not button.isChecked():\n button.blockSignals(True)\n button.setChecked(True)\n button.blockSignals(False)\n\n if isinstance(new_tool, CheckableTool):\n self._active_tool = new_tool\n self.parent().set_status(new_tool.status_tip)\n self.tool_activated.emit()\n else:\n self._active_tool = None\n self.parent().set_status('')\n self.tool_deactivated.emit()\n\n def activate_tool(self, tool):\n tool.activate()\n\n def deactivate_tool(self, tool):\n if isinstance(tool, CheckableTool):\n tool.deactivate()\n\n def add_tool(self, tool):\n\n parent = QtWidgets.QToolBar.parent(self)\n\n if isinstance(tool.icon, six.string_types):\n if os.path.exists(tool.icon):\n icon = QtGui.QIcon(tool.icon)\n else:\n icon = get_icon(tool.icon)\n else:\n icon = tool.icon\n\n action = QtWidgets.QAction(icon, tool.action_text, parent)\n\n def toggle(checked):\n if checked:\n self.active_tool = tool\n else:\n self.active_tool = None\n\n def trigger(checked):\n self.active_tool = tool\n\n parent.addAction(action)\n\n if isinstance(tool, CheckableTool):\n action.toggled.connect(toggle)\n else:\n action.triggered.connect(trigger)\n\n shortcut = None\n\n if tool.shortcut is not None:\n\n # Make sure that the keyboard shortcut is unique\n for m in self.tools.values():\n if tool.shortcut == m.shortcut:\n warnings.warn(\"Tools '{0}' and '{1}' have the same shortcut \"\n \"('{2}'). Ignoring shortcut for \"\n \"'{1}'\".format(m.tool_id, tool.tool_id, tool.shortcut))\n break\n else:\n shortcut = tool.shortcut\n action.setShortcut(tool.shortcut)\n action.setShortcutContext(Qt.WidgetShortcut)\n\n if shortcut is None:\n action.setToolTip(tool.tool_tip)\n else:\n action.setToolTip(tool.tool_tip + \" [shortcut: {0}]\".format(shortcut))\n\n action.setCheckable(isinstance(tool, CheckableTool))\n self.actions[tool.tool_id] = action\n\n menu_actions = tool.menu_actions()\n if len(menu_actions) > 0:\n menu = QtWidgets.QMenu(self)\n for ma in tool.menu_actions():\n ma.setParent(self)\n menu.addAction(ma)\n action.setMenu(menu)\n menu.triggered.connect(trigger)\n\n self.addAction(action)\n\n # Bind tool visibility to tool.enabled\n def toggle(state):\n action.setVisible(state)\n action.setEnabled(state)\n add_callback(tool, 'enabled', toggle)\n\n self.tools[tool.tool_id] = tool\n\n return action\n","sub_path":"glue/viewers/common/qt/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"600782538","text":"from cs50 import get_string\nimport re\n\n# Ideas: Use regex to validate user input (correct length, only digits?)\n# Criteria for valid card number:\n# - Only includes numbers\n# - Length is 13, 15, or 16\n# - Proper checksum\n# Easier to work with strings than numbers in this scenario (perform less arithmetic)\n\n\ndef main():\n\n user_input = get_string(\"Number: \")\n\n # Check for correct length and only diigts\n if (not(possible_card(user_input))):\n print(\"INVALID\")\n return 1\n\n # Check if the check sums are valid\n if (not(valid_check_sum(user_input))):\n print(\"INVALID\")\n return 2\n\n # Print the particular type of card\n print_card_type(user_input)\n\n return 0\n\n\n# valid : String -> Boolean\n# Is the provided user_input a possible card number?\n\n\ndef possible_card(user_input):\n match = re.fullmatch('[0-9]{13}|[0-9]{15}|[0-9]{16}', user_input)\n if match:\n return True\n return False\n\n\n# valid_check_sum : String -> Boolean\n# Do the digits in _card_number_ form a valid checksum?\n\n\ndef valid_check_sum(card_number):\n check_sum = first_sum(card_number) + second_sum(card_number)\n valid = check_sum % 10 == 0\n return valid\n\n\n# first_sum : String -> Number\n# Determines the sum of every other digit in _card_number_ times 2, starting at the second to rightmost\n\n\ndef first_sum(card_number):\n\n # Accumulator\n total = 0\n card_length = len(card_number)\n\n for i in range(1, card_length, 2):\n index = (card_length - 1) - i\n total += sum_digits(int(card_number[index]) * 2)\n\n return total\n\n\n# sum_digits : Number -> Number\n# Determines the sum of all digits in _num_\n\n\ndef sum_digits(num):\n\n # Accumulator\n total = 0\n digits = str(num)\n\n for i in range(0, len(digits)):\n total += int(digits[i])\n\n return total\n\n# second_sum : String -> Number\n# Determines the sum of every other digit in _card_number_, starting at the rightmost digit\n\n\ndef second_sum(card_number):\n\n # Accumulator\n total = 0\n card_length = len(card_number)\n\n for i in range(0, card_length, 2):\n index = (card_length - 1) - i\n total += int(card_number[index])\n\n return total\n\n\n# print_card_type : String -> _\n# Prints the type of card that _card_number_ represents, or \"invalid\"\n# N.B. Card number consists of only digits\n\n\ndef print_card_type(card_number):\n # Use regex to determine if digits are in correct positions\n\n # AMEX: Contains 15 digits; first 2 are 34 or 37\n amex = re.fullmatch(\"34\\d{13}|37\\d{13}\", card_number)\n mastercard = re.fullmatch(\"51\\d{14}|52\\d{14}|53\\d{14}|54\\d{14}|55\\d{14}|\", card_number)\n visa = re.fullmatch(\"4\\d{12}|4\\d{15}\", card_number)\n\n if amex:\n print(\"AMEX\")\n elif mastercard:\n print(\"MASTERCARD\")\n elif visa:\n print(\"VISA\")\n else:\n print(\"INVALID\")\n\n\nmain()","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68223162","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 28 08:43:52 2021\r\n\r\n@author: Angelina Kiman\r\n\"\"\"\r\n\r\n# import\r\nimport json\r\nimport csv\r\nimport os\r\nimport copy\r\n\r\n# read file\r\ncsvfile = open('C:\\\\Users\\\\angel\\\\OneDrive\\\\Documents\\\\FairFace\\\\fairface_label_val.csv', 'r')\r\n\r\n# write file\r\njsonfile = open('C:\\\\Users\\\\angel\\\\OneDrive\\\\Documents\\\\FairFace\\\\fairface_label_val.manifest', 'w')\r\n\r\n# s3 bucket path\r\ns3bucket = 's3://fairface-dataset/'\r\n\r\n# loop over rows\r\ncsv_reader = csv.reader(csvfile, delimiter=',')\r\nline_count = 0\r\nmax_lines = 10954\r\njsons = {}\r\nfor row in csv_reader:\r\n if line_count == 0:\r\n line_count += 1\r\n else:\r\n filename = row[0]\r\n \r\n # meta json\r\n metat = {}\r\n metat['confidence'] = 1\r\n metat['human-annotated'] = \"yes\"\r\n metat['creation-date'] = \"2020-03-06T17:46:39.176\"\r\n metat['type'] = \"groundtruth/image-classification\"\r\n metarace = copy.deepcopy(metat)\r\n metarace['class-name'] = row[3]\r\n metagender = copy.deepcopy(metat)\r\n metagender['class-name'] = row[2]\r\n \r\n # final json item\r\n jsonitem = {}\r\n jsonitem['source-ref'] = os.path.join(s3bucket, filename)\r\n jsonitem[\"gender\"] = 0\r\n jsonitem[\"gender-metadata\"] = metagender\r\n \r\n jsonitem[\"race\"] = 1\r\n jsonitem[\"race-metadata\"] = metarace\r\n line_count += 1\r\n \r\n # write line\r\n json.dump(jsonitem, jsonfile)\r\n jsonfile.write(\"\\n\")\r\n \r\n if line_count > max_lines:\r\n break\r\nprint(f'Processed {line_count} lines.')\r\n\r\ncsvfile.close()\r\njsonfile.close()\r\n\r\n#{\r\n# \"source-ref\": \"s3://bucket/images/sunrise.png\",\r\n# \"testdataset-classification_Sunrise\": 1,\r\n# \"testdataset-classification_Sunrise-metadata\": {\r\n# \"confidence\": 1,\r\n# \"job-name\": \"labeling-job/testdataset-classification_Sunrise\",\r\n# \"class-name\": \"Sunrise\",\r\n# \"human-annotated\": \"yes\",\r\n# \"creation-date\": \"2020-03-06T17:46:39.176\",\r\n# \"type\": \"groundtruth/image-classification\"\r\n# }\r\n#}","sub_path":"csv2jsonline_test.py","file_name":"csv2jsonline_test.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"181635229","text":"##########################################################################\n# MediPy - Copyright (C) Universite de Strasbourg, 2011 \n# Distributed under the terms of the CeCILL-B license, as published by \n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to \n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html \n# for details. \n##########################################################################\n\nimport logging\nimport xml.dom.minidom\n\nimport wx.xrc\n\nclass UI(object):\n \"\"\" Simple object holding wx controls\n \"\"\"\n \n def from_window(self, window, names):\n \"\"\" Set members from controls taken from wx window\n \"\"\"\n \n for name in names :\n if not hasattr(self, name) :\n logging.warning(\"%s has no attribute \\\"%s\\\"\", \n type(self), name)\n value = wx.xrc.XRCCTRL(window, name)\n if value is None :\n logging.warning(\"%s has no control \\\"%s\\\"\", \n type(window), name)\n else :\n setattr(self, name, value)\n\ndef load_xrc(xrc_file, handlers, class_name, parent, window_name):\n \"\"\" Load an XRC file with given handlers, and load a window from that file\n Return the xml document and the window\n \"\"\"\n \n xml_document = xml.dom.minidom.parse(xrc_file)\n resource = wx.xrc.EmptyXmlResource()\n for handler in handlers :\n resource.InsertHandler(handler)\n # Don't use LoadFromString so that the internal wxFileSystem is set to\n # the directory holding xrc_file\n resource.Load(xrc_file)\n \n # Build frame\n loader = getattr(resource, \"Load%s\"%class_name)\n if class_name in [\"Bitmap\", \"Icon\", \"Menu\"] :\n window = loader(window_name)\n else :\n window = loader(parent, window_name)\n \n return (xml_document, window) \n","sub_path":"lib/medipy/gui/base/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"487213570","text":"def fibo(n):\r\n if (n<=1):\r\n return n\r\n else:\r\n n1=fibo(n-1)\r\n n2=fibo(n-2)\r\n n=n1+n2\r\n return n\r\nn=int(input(\"enter the number of terms:\"))\r\nfor i in range(n):\r\n print (fibo(i))\r\n","sub_path":"fibo using recu.py","file_name":"fibo using recu.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"509012606","text":"import os\nfrom uuid import uuid4\n\n\ndef gen_unique_id():\n return str(uuid4())\n\n\ndef isinteger(val):\n try:\n return int(val)\n except:\n return False\n\n \ndef pathbits(path):\n if path.endswith('/'):\n path = path[:-1]\n if path.startswith('/'):\n path = path[1:]\n if path:\n return path.split('/')\n else:\n return []\n \n \ndef get_maxfd():\n maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]\n if (maxfd == resource.RLIM_INFINITY):\n maxfd = MAXFD\n return maxfd\n\n\ndef daemonize():\n \"\"\"\\\n Standard daemonization of a process. Code is based on the\n ActiveState recipe at:\n http://code.activestate.com/recipes/278731/\n \"\"\"\n if not 'UNUK_FD' in os.environ:\n if os.fork() == 0: \n os.setsid()\n if os.fork() != 0:\n os.umask(0) \n else:\n os._exit(0)\n else:\n os._exit(0)\n \n maxfd = get_maxfd()\n\n # Iterate through and close all file descriptors.\n for fd in range(0, maxfd):\n try:\n os.close(fd)\n except OSError: # ERROR, fd wasn't open to begin with (ignored)\n pass\n \n os.open(REDIRECT_TO, os.O_RDWR)\n os.dup2(0, 1)\n os.dup2(0, 2)\n","sub_path":"src/unuk/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"279167480","text":"N=int(input())\nws=input().split()\nBASE=\"zrbcdwtjfqlvsxpmhkng\"\nress=[]\nfor w in ws:\n w=w.lower()\n res=\"\"\n for c in w:\n idx = BASE.find(c)\n if idx>=0:res+=str(idx//2)\n if len(res)>0:\n ress.append(res)\nprint(\" \".join(ress))\n","sub_path":"atcoder/arc/010/arc011b.py","file_name":"arc011b.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"319166153","text":"import logging\nimport numpy as np\nimport random\nimport copy\nimport chess\n\nfrom log import setupLogging\nfrom GameState import Turn, Castle, GameState, Move\nfrom BitBoard import BitBoard, PieceType, Occupier, BitBoardsFromFenString, I2S\nfrom Supervised import SupervisedChess\n\n\nDEFAULT_POLICY_WEIGHT = 1e-2\nEXPLORE_CONSTANT = 1e-1\n\nTERMINATION_CHANCE = 1e-3\n\n# Matches each game state (hashable, unaffected by training) to a node in our game tree\nSTATE_TO_NODE_DICT = {}\n\n\nclass Edge(object):\n def __init__(self, parent, move):\n self.parent = parent # the parent node game state\n self.move = move # descriptor of the chess move to take\n self.target = None # the game state this move would lead to\n self.targetState = None\n self.boardState = None\n\n\nclass Node(object):\n\n # Initialization\n\n def __init__(self, gameState, board, model=None):\n self.gameState = gameState\n self.qReward = 0.0\n self.model = model\n self.numVisits = 0\n self.isTerminal = True\n self.edges = []\n # if we've trained a policy network to start from, its weights go here\n self.policyWeights = []\n self.board = board\n self.initEdges()\n\n def initEdges(self):\n \"\"\"\n For the game state, figures out the possible moves, and creates edges from it\n Also creates empty nodes for the next level down\n \"\"\"\n moveList = self.gameState.getPossibleMoves(self.board)\n\n if(self.model != None):\n movListWeights = self.model.getMovePreferenceList(\n self.gameState, moveList)\n print(movListWeights)\n\n self.edges = [Edge(self, x) for x in moveList]\n\n for i, edge in enumerate(self.edges):\n resultGameState = moveList[i].apply(self.gameState)\n edge.targetState = resultGameState\n edge.target = None # otherwise, will recursively create every possible game state from here\n\n # additions to MCTS tp save python-chess type board-state\n final_move = I2S(moveList[i].startLoc) + I2S(moveList[i].endLoc)\n final_move = chess.Move.from_uci(final_move)\n self.board.push(final_move)\n edge.boardState = copy.deepcopy(self.board)\n self.board.pop()\n\n self.policyWeights = [DEFAULT_POLICY_WEIGHT for x in self.edges]\n\n def hasEdgeTargets(self):\n if len(self.edges) == 0:\n return True\n if (self.edges[0].target == None):\n return False\n return True\n\n def fillInEdgeTargets(self):\n for edge in self.edges:\n if edge.targetState not in STATE_TO_NODE_DICT:\n STATE_TO_NODE_DICT[edge.targeState] = Node(\n edge.targetState, edge.boardState, self.model)\n edge.target = STATE_TO_NODE_DICT[edge.targetState]\n\n # Accessors\n\n def getNumVisits(self):\n return self.numVisits\n\n def getQReward(self):\n return self.qReward\n\n # Calculations\n\n def getExploitationTerm(self):\n \"\"\"\n Q(v)/N(v)\n \"\"\"\n if self.numVisits == 0:\n return 0\n else:\n return self.qReward / self.numVisits\n\n def getExplorationTerm(self, parent):\n \"\"\"\n sqrt(N(v)/(1 + N(vi)))\n \"\"\"\n return math.sqrt(parent.getNumVisits / (1.0 + self.getNumVisits()))\n\n # Traversal\n\n def getEdgeToTraverse(self):\n \"\"\"\n Returns index of edge to go down\n \"\"\"\n\n uctArray = np.zeros((len(self.edges)))\n\n for i in range(len(self.edges)):\n edge = self.edges[i]\n nextNode = edge.target\n if nextNode.turn == Turn.WHITE: # flips reward valuation based on whose turn it is\n exploitTerm = nextNode.getExploitationTerm()\n else:\n exploitTerm = -nextNode.getExploitationTerm()\n uct = nextNode.getExploitationTerm() + EXPLORE_CONSTANT * \\\n self.policyWeights[i] * nextNode.getExplorationTerm(self)\n uctArray[i] = uct\n\n return np.argmax(uctArray)\n\n def evaluateSubtree(self):\n \"\"\"\n Traverses the game tree starting at this node\n \"\"\"\n if random.random() <= TERMINATION_CHANCE:\n return 0.0 # pretend we didn't visit this node, call it a draw\n\n self.numVisits += 1\n if self.gameState.isCheckmate():\n self.isTerminal = True\n if self.gameState.turn == Turn.WHITE:\n self.qReward += 1.0\n else:\n self.qReward += -1.0\n else:\n if not self.hasEdgeTargets():\n self.fillInEdgeTargets() # lazily expanding these nodes\n\n edgeToVisit = self.getEdgeToTraverse()\n\n # RECURSIVE\n childReward = self.edges[edgeToVisit].target.evaluateSubtree()\n self.updateWithNewChildReward(edgeToVisit, childReward) # backprop\n\n return self.qReward\n\n def updateWithNewChildReward(self, childIndex, childReward):\n \"\"\"\n Updates our reward based on what a child sub-tree came back with\n \"\"\"\n self.qReward += childReward\n\n\n# FEN string for chess board state where Nb5 checkmates\nexampleWhiteCheckmate = \"k7/8/1R6/8/3N4/8/8/7K w\"\n\n\ndef main():\n random.seed(0xbadbad)\n logging.info(\"Running main function in mcts\")\n\n exampleBoards = BitBoardsFromFenString(exampleWhiteCheckmate)\n\n logging.info(exampleBoards)\n for board in exampleBoards:\n logging.info(board)\n\n\nif __name__ == \"__main__\":\n setupLogging()\n main()\n","sub_path":"mcts1.py","file_name":"mcts1.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"458874933","text":"from django.shortcuts import get_object_or_404, render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom .models import News, GedTesting, ForEducators, NewLinks, PopLinks, Carousel\n\ndef index(request):\n carousel_images = Carousel.objects.all();\n latest_post = News.objects.latest();\n newlinks = NewLinks.objects.all();\n poplinks = PopLinks.objects.all();\n return render(request, 'content/index.html', {'carousel_images' : carousel_images, 'latest_post' : latest_post, 'newlinks' : newlinks, 'poplinks' : poplinks})\n\ndef news(request):\n all_posts = News.objects.all();\n paginator = Paginator(all_posts, 3) # Show 3 post per page\n\n page = request.GET.get('page')\n try:\n all_posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n all_posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n all_posts = paginator.page(paginator.num_pages)\n\n return render(request, 'content/news.html', {'all_posts' : all_posts} )\n\ndef ged_testing(request):\n pv_resources = GedTesting.objects.all().filter(resource_category='pv_resource')\n pv_links = GedTesting.objects.all().filter(resource_category='pv_link')\n ged_resources = GedTesting.objects.all().filter(resource_category='ged_resource')\n ged_links = GedTesting.objects.all().filter(resource_category='ged_link')\n return render(request, 'content/ged-testing.html', {'pv_resources' : pv_resources, 'pv_links': pv_links, 'ged_resources': ged_resources, 'ged_links': ged_links})\n\ndef for_educators(request):\n policies = ForEducators.objects.all().filter(resource_category='policy')\n manuals = ForEducators.objects.all().filter(resource_category='manual')\n mathematics = ForEducators.objects.all().filter(resource_category='mathematics')\n reading_language_arts = ForEducators.objects.all().filter(resource_category='reading_language_arts')\n social_studies = ForEducators.objects.all().filter(resource_category='social_studies')\n science = ForEducators.objects.all().filter(resource_category='science')\n digital_literacy = ForEducators.objects.all().filter(resource_category='digital_literacy')\n pre_release = ForEducators.objects.all().filter(resource_category='pre_release')\n return render(request, 'content/for-educators.html', {'polcies' : policies, 'manuals' : manuals, 'mathematics' : mathematics, 'reading_language_arts': reading_language_arts, 'social_studies': social_studies, 'science': science, 'digital_literacy': digital_literacy, 'pre_release': pre_release })\n","sub_path":"content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"571449173","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreate Time: 2019/12/2 下午8:17\nAuthor: ybx\n\"\"\"\n\n\nimport os\nimport pydicom\nimport shutil\n\n\ndcm_path = \"/media/tx-eva-data/yushiyan/基础数据库/中国医科大学附属第一医院/tmp\"\nout_path = \"/media/tx-eva-data/yushiyan/原始数据库/中国医科大学附属第一医院\"\n\n\n\nfor dirpath, dirname, filenames in os.walk(dcm_path):\n for file in filenames:\n dicom_path = os.path.join(dirpath, file)\n # pid = dicom_path.split('/')[-2]\n # print(pid)\n info = pydicom.read_file(dicom_path, force=True)\n hosp_name = dicom_path.split('/')[5]\n pid = dicom_path.split('/')[-2][9:-5]\n studyInstanceUID = info.StudyInstanceUID\n seriesInstanceUID = info.SeriesInstanceUID\n sopInstanceUID = info.SOPInstanceUID\n new_name = os.path.join(pid, studyInstanceUID, seriesInstanceUID)\n save_path = os.path.join(out_path, new_name)\n # print(save_path)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n info.save_as(os.path.join(save_path, sopInstanceUID + '.dcm'))\n # print('finished')\n\n\n\n\n\n\n\n\n\n","sub_path":"com/infervision/code_0917/change_dicom_name.py","file_name":"change_dicom_name.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"468786375","text":"__author__ = 'sunjoo'\n\nimport sys,os,fnmatch\nimport pymongo\nimport pymysql\nimport time,threading\nimport base64,zlib\n\nlock = threading.Lock()\nfileLocations = {}\n\n#database_host='156.147.61.76'\ndatabase_host='127.0.0.1'\ndatabase_name='cerberusLog'\ndatabase_user='admin'\ndatabase_password='soyulpark'\n#rootDir = '/Users/sunjoo/temp/job'\nrootDir = '/vol/users/allessunjoo.park/jobs'\n\nlist_of_tables = []\nlist_of_jobs = []\n\nclass DisplayTimeElapsed(object):\n def __init__(self, interval=1,inputData=\"\"):\n self.interval = interval\n thread = threading.Thread(target=self.run, args=(inputData,))\n thread.daemon = True\n thread.start() # Start the execution\n def run(self,argv):\n i=0\n while True:\n i+=1\n print(argv+\"=\"+str(i))\n time.sleep(self.interval)\n\nclass DisplayFiles(object):\n def __init__(self,job_name=\"\"):\n thread = threading.Thread(target=self.run,args=(job_name,))\n thread.daemon = False\n thread.start()\n def run(self,job_name):\n file_location = \"\"\n log_files = []\n with lock:\n log_files = fileLocations[job_name]\n inner = threading.local()\n print(job_name)\n try:\n while True:\n connection = pymysql.connect(host=database_host,\n user=database_user,\n password=database_password,\n db=database_name,\n charset='utf8',\n connect_timeout=20,\n cursorclass=pymysql.cursors.DictCursor)\n \"\"\"\n with lock:\n try:\n file_location = fileLocations[job_name].pop()\n print(file_location)\n except IndexError:\n print(\"Error: There is no build log file in a list.\")\n connection.close()\n return\n \"\"\"\n file_location = log_files.pop()\n print(file_location)\n f = open(file_location,'r')\n i=0;\n file_location_split = file_location.split('/')\n job_name = file_location_split[len(file_location_split)-4].split('-')\n buildNumber= file_location_split[len(file_location_split)-2]\n branchName= job_name[1]\n machineName = job_name[3]\n input_str = '-'.join(job_name)+\":\"+buildNumber\n jobname_str = '_'.join(job_name)\n main_cursor = connection.cursor()\n if not jobname_str in list_of_tables:\n sql_create_table = 'CREATE TABLE ' + jobname_str \\\n + ' (`buildNumber` INT, `machineName` CHAR(30), `branchName` CHAR(30), `log` LONGTEXT) '\n result_create_table = main_cursor.execute(sql_create_table)\n list_of_tables.append(jobname_str)\n sql_find_log = 'SELECT * FROM ' + jobname_str+' WHERE buildNumber = \\\"'+buildNumber+'\\\"'\n find_cursor = connection.cursor()\n result_find_log = find_cursor.execute(sql_find_log)\n already = []\n log_raw = ''\n if not result_find_log == 0:\n log_raw = base64.b64decode(find_cursor.fetchone()['log']).decode('zlib')\n if \"Finished: \" in log_raw:\n already.append(jobname_str + \":\" +str(buildNumber))\n else:\n\n print(input_str)\n log_data = ''\n for line in f:\n log_data+=line\n sql_remove_log = 'DELETE FROM '+jobname_str+' WHERE `buildNumber` = ' + buildNumber\n result_remove_log = main_cursor.execute(sql_remove_log)\n try:\n log_data_b64 = base64.b64encode(log_data.encode(\"zlib\"))\n sql_insert_log = 'INSERT INTO ' + jobname_str + ' ( buildNumber, machineName, branchName, log)' \\\n + ' VALUES ( %s, \\'%s\\', \\'%s\\', \\'%s\\')' %( buildNumber,machineName,branchName,log_data_b64)\n try:\n result_insert_log = main_cursor.execute(sql_insert_log)\n except:\n connection.close()\n new_connection = pymysql.connect(host=database_host,\n user=database_user,\n password=database_password,\n db=database_name,\n charset='utf8',\n connect_timeout=20,\n cursorclass=pymysql.cursors.DictCursor)\n new_cursor = new_connection.cursor()\n result_insert_log = new_cursor.execute(sql_insert_log)\n new_connection.commit()\n new_connection.close()\n except pymysql.err.ProgrammingError as err:\n print(\"Error: insert error : \" + jobname_str + \":\"+buildNumber)\n print(err)\n connection.commit()\n connection.close()\n except pymysql.err.InternalError as err:\n print(err)\n print(\"Please check a database : \" + database_name)\n \"\"\"\n for line in f:\n i+=1\n line=line.replace('\\n','')\n input_coll.insert({\"jobName\":job_name,\n \"buildNumber\":buildNumber,\n \"logLine\":i,\"logData\":line})\n\"\"\"\ntopDir = '/vol/users/allessunjoo.park/jobs'\n\nglobal_connection = ''\n\ntry:\n global_connection = pymysql.connect(host=database_host,\n user=database_user,\n password=database_password,\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n global_connection.select_db(database_name)\nexcept pymysql.err.InternalError as err:\n create_cursor = global_connection.cursor()\n try:\n sql_create_database = \"CREATE DATABASE \" + database_name\\\n + \" DEFAULT CHARACTER SET utf8\"\n create_cursor.execute(sql_create_database)\n except pymysql.err.ProgrammingError as err:\n print(err)\n print(\"LINE : %s\" % 111)\n sys.exit(123)\n\"\"\"\nGet list of tables in a database and add them on\n\"\"\"\nglobal_connection.select_db(database_name)\nsql_get_tables = 'SHOW TABLES'\ncursor_get_tables = global_connection.cursor()\ncursor_get_tables.execute(sql_get_tables)\nresult_get_tables = cursor_get_tables.fetchall()\nfor each_table in result_get_tables:\n list_of_tables.append(each_table['Tables_in_'+database_name])\nfor a_value in list_of_tables:\n print(a_value)\n\n\n\n\n\nfor root,dirs,files in os.walk(rootDir):\n for file in fnmatch.filter(files,'log'):\n root_elements = root.split('/')\n job_name = root_elements[len(root_elements)-3]\n if not job_name in fileLocations.keys():\n fileLocations[job_name] = []\n fileLocations[job_name].append(os.path.join(root,file))\nprint(fileLocations.keys())\ndisplayTime = DisplayTimeElapsed(interval=1,inputData=\"Check Time : \")\n\nfor each_a in fileLocations:\n print(each_a)\ntest = []\n\"\"\"\nfor i in range(4):\n test.append(DisplayFiles())\n\"\"\"\nfor each_job in fileLocations.keys():\n test.append(DisplayFiles(each_job))\n","sub_path":"loginsert_with_thread_mysql.py","file_name":"loginsert_with_thread_mysql.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364584439","text":"import argparse\n\n#\ncmd_atgs_list = \" add \".split()\n\n# setup\nparser = argparse.ArgumentParser()\nparser.add_argument(\"echo\")\n\n# args parser\nargs = parser.parse_args(cmd_atgs_list)\n\n# handle\nif args.echo:\n print(args.echo)","sub_path":"python-argparse-string.py","file_name":"python-argparse-string.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396482265","text":"# Copyright (C) 2015 Chris Wilen\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"\n### BEGIN NODE INFO\n[info]\nname = ADR1\nversion = 1.3.2\ndescription = Controls ADR1. It can be connected to by adr_client.py or other labrad clients to control the ADR with a GUI, etc.\ninstancename = ADR1\n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 20\n### END NODE INFO\n\"\"\"\n\nfrom adr_server import *\nimport sys\n\nif __name__ == \"__main__\":\n if '-a' not in sys.argv:\n sys.argv.append('-a')\n sys.argv.append('ADR1')\n __server__ = ADRServer(sys.argv)\n util.runServer(__server__)","sub_path":"adr/adr1.py","file_name":"adr1.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"247913886","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule pyelegantsdds.tools.sddsutils\n=================================================================\n\nA module containing a list of sdds comamnds.\n\n\"\"\"\nimport os\nimport subprocess\nimport time\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom dask import dataframe as dd\nfrom termcolor import colored\n\n\ndef sddsconvert2ascii(sif, filename):\n \"\"\"\n Convert sdds binary file to ascii.\n\n Parameters:\n -----------\n sif: str\n Singularity executable container where sdds is installed.\n\n filename: str\n filename of the file to convert\n\n Returns:\n --------\n None\n A new file is created \"filename.txt\" in ascii format.\n \"\"\"\n subprocess.run(f\"{sif} sddsconvert -ascii {filename} {filename}.txt\", shell=True)\n\n\ndef sddsconvert2binary(sif, filename):\n \"\"\"\n Convert sdds ascii file to binary.\n\n Parameters:\n -----------\n sif: str\n Singularity executable container where sdds is installed.\n\n filename: str\n filename of the file to convert\n\n Returns:\n --------\n None\n A new file is created \"filename.txt\" in binary format.\n\n \"\"\"\n subprocess.run(f\"{sif} sddsconvert -binary {filename} {filename}.bin\", shell=True)\n\n\ndef getParameterList(parameterlistfile):\n \"\"\"\n Method to read back the parameter list,\n extracted with sddsextractparameternames.\n\n Parameters:\n -----------\n parameterlistfile: str\n Filename of the file where the parameter list is\n stored.\n\n Returns:\n --------\n parameterlist: List[str]\n List of the parameter names.\n \"\"\"\n with open(parameterlistfile, \"r\") as f:\n lines = f.read().splitlines()\n\n return [line for line in lines]\n\n\ndef sddsextractparameternames(sif, elefma, ext=\"-001.w1\"):\n \"\"\"\n Extract the parameter names from an SDDS file.\n\n Parameters:\n -----------\n sif: str\n Singularity executable container where sdds is installed.\n elefma: str\n Elegant .ele base name (filename of the .ele file without the extension)\n ext: str\n Extension of the file, format is \"elefma{ext}\", usually watchpoint extension as\n in default.\n\n Returns:\n --------\n filename: str\n Filename where the parameter list is stored.\n parameterlist: List[str]\n List of extracted parameter names.\n\n \"\"\"\n subprocess.run(\n f\"{sif} sddsquery -parameterList \" f\"{elefma}{ext} > {elefma}_parameterlist.txt\",\n shell=True,\n )\n\n filename = f\"{elefma}_parameterlist.txt\"\n\n return filename, getParameterList(filename)\n\n\ndef getColumnList(columnlistfile):\n \"\"\"\n Method to read back the column list,\n extracted with sddsextractcolumnnames.\n\n Parameters:\n -----------\n columnlistfild: str\n Filename of the file where the column list is\n stored.\n\n Returns:\n --------\n columnlist: List[str]\n List of the column names.\n \"\"\"\n with open(columnlistfile, \"r\") as f:\n lines = f.read().splitlines()\n\n return [line for line in lines]\n\n\ndef sddsextractcolumnnames(sif, elefma, ext=\"-001.w1\"):\n \"\"\"\n Extract the columns names from an SDDS file.\n\n Parameters:\n -----------\n sif: str\n Singularity executable container where sdds is installed.\n elefma: str\n Elegant .ele base name (filename of the .ele file without the extension)\n ext: str\n Extension of the file, format is \"elefma{ext}\", usually watchpoint extension as\n in default.\n\n Returns:\n --------\n filename: str\n Filename where the column list is stored.\n columnlist: List[str]\n List of extracted column names.\n\n \"\"\"\n subprocess.run(\n f\"{sif} sddsquery -columnList \" f\"{elefma}{ext} > {elefma}_columnlist.txt\", shell=True\n )\n\n filename = f\"{elefma}_columnlist.txt\"\n\n return filename, getColumnList(filename)\n\n\ndef sddsextractparametervalues():\n \"\"\"\n Extract the parameter values from an sdds file.\n :return:\n \"\"\"\n pass\n\n\ndef sddsextractcolumnvalues(sif, elefma, cols=\"x,xp,y,yp,t,p,dt,particleID\", ext=\"-001.w1\"):\n \"\"\"\n Extract the column values from an sdds file.\n\n Parameters:\n -----------\n sif: str\n Singularity executable container where sdds is installed.\n elefma: str\n Elegant .ele base name (filename of the .ele file without the extension)\n cols: str\n String representation of the list of columns to extract the values for.\n ext: str\n Extension of the file, format is \"elefma{ext}\", usually watchpoint extension as\n in default.\n\n Returns:\n --------\n filename: str\n Filename where the values are stored.\n\n \"\"\"\n subprocess.run(\n f\"{sif} sdds2stream -col={cols} \" f\"{elefma}{ext} > {elefma}_particle_data.txt\", shell=True\n )\n return f\"{elefma}_particle_data.txt\"\n\n\ndef processvaryelementoutput(sif, basename_binary):\n \"\"\"\n Processes coordinate output file from tracking in combination\n with vary element. It adds a column \"step\" such that the resulting\n coordinate table can be grouped per \"step\" and \"particleID\".\n This allows to study individual tracked particles for individual vary\n steps.\n\n Parameters:\n -----------\n sif : str\n Singularity executable container where sdds is installed.add()\n basename_binary : str\n name of the file to be processed\n\n Returns:\n --------\n filename : str\n Filename where the processed data is stored.\n\n \"\"\"\n subprocess.run(\n f\"{sif} sddsprocess -define=column,step,Step {basename_binary} processed_{basename_binary}\",\n check=True,\n shell=True,\n )\n return f\"processed_{basename_binary}\"\n\n\ndef generate_scan_dataset(sif, datasetdict, filepath):\n \"\"\"\n Generates a file called \"scan.sdds\" containing columns of values\n to be used by elegant to scan over using vary_element method.\n\n Parameters:\n -----------\n datadict: dict\n dictionary where the keys are the column headers and values are list of values to scan over\n Note: all dict values need to have the same length\n\n filepath: str\n path where the simulation will be run (i.e where ele and lte files are)\n\n Returns:\n --------\n None\n Creates file scan.sdds in the filepath to be used in the simulations.add()\n\n \"\"\"\n # get current working dir to be able to get back\n currdir = os.getcwd()\n\n # change to simulation dir\n os.chdir(filepath)\n print(\"File path used: {}\".format(filepath))\n\n # create scan.sdds\n cmd = f\"{sif} sddsmakedataset scan.sdds \"\n\n for k, v in datasetdict.items():\n cmd += f\"-column={k},type=double -data=\" + \",\".join([str(vv) for vv in v]) + \" \"\n\n subprocess.run(cmd, check=True, shell=True)\n\n # change back to original working dir\n os.chdir(currdir)\n\n\ndef sddsplot(\n sif,\n filepath,\n columnNames=[\"x\", \"xp\"],\n markerstyle=\"sym\",\n vary=\"subtype\",\n scalemarker=1,\n fill=True,\n order=\"spectral\",\n split=\"page\",\n scale=\"0,0,0,0\",\n):\n \"\"\"\n Method to generate sdds plot.\n\n Parameters:\n -----------\n sif:\n filepath\n columnNames\n markerstyle\n vary\n scalemarker\n \"\"\"\n if fill:\n strfill = \",fill\"\n else:\n strfill = \"\"\n cmd = f\"{sif} sddsplot -columnNames={','.join(columnNames)} {filepath} \"\n cmd += f\"-graph={markerstyle},vary={vary}{strfill},scale={str(scalemarker)} -order={order} -split={split} -scale={scale}\"\n subprocess.run(cmd, check=True, shell=True)\n\n\ndef readParticleData(coordfile, collist, simtype=\"spt\", vary=False):\n \"\"\" \"\"\"\n data = dd.read_csv(coordfile, delimiter=\" \", names=collist, header=None)\n if vary:\n grouped = data.groupby(by=\"step\")\n\n def f(group):\n return group.join(pd.DataFrame({\"Turn\": group.groupby(\"particleID\").cumcount() + 1}))\n\n data = grouped.apply(f)\n\n elif simtype == \"spt\":\n data[\"Turn\"] = data.groupby(\"particleID\").cumcount() + 1\n\n return data\n","sub_path":"pyelegantsdds/tools/sddsutils.py","file_name":"sddsutils.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124306417","text":"# coding: utf-8\nimport pytest\n\nfrom app.main import create_app\nfrom app.storage.models import db\nfrom app.storage.documents import mongo\n\n\n@pytest.fixture()\ndef testapp(request):\n app = create_app('testing')\n client = app.test_client()\n app_context = app.app_context()\n app_context.push()\n\n db.create_all()\n\n def setup():\n mongo.db.directors.remove()\n mongo.db.mconfig.remove()\n\n def teardown():\n db.session.remove()\n mongo.db.directors.remove()\n mongo.db.mconfig.remove()\n db.drop_all()\n app_context.pop()\n\n request.addfinalizer(teardown)\n return client\n","sub_path":"framework-banchmark/flask/cloud_director/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432414732","text":"\nimport scrapy\nfrom scrapy.loader import ItemLoader\nimport os\n#import urlparse\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nimport json\nimport logging\nimport re\nfrom lxml import html\nfrom goodreads.items import BookListEditions\nfrom goodreads.items import BookEditionItem\n\nclass EditionSpider(scrapy.Spider):\n name = \"edition_extractor\"\n list_editions = BookListEditions()\n list_editions['dic'] = {}\n\n if os.path.exists('links.json'):\n with open('links.json', 'r') as json_file:\n data = json.load(json_file)\n start_urls = data[0]['bookUrls']\n\n def parse(self, response):\n editions = {'name' : '' }\n editons_page = response.xpath(\"//div[@class='otherEditionsActions']/a[@class='actionLinkLite'][1]/@href\").extract()\n if(len(editons_page) > 0):\n url = urlparse.urljoin(response.url, editons_page[0]) \n #url = urlparse.urljoin(url,'?sort=num_ratings')\n request = scrapy.Request(url, callback = self.parse_editions_url, meta = {'editions' : editions})\n #request.meta['editions'] = editions\n yield request\n\n def parse_editions_url(self, response):\n name = response.css('div.mainContentFloat h1 a::text').extract_first().strip()\n editions = response.meta['editions']\n all_books_data = {}\n if(name not in editions['name']):\n editions['name'] = name\n editions['urls'] = []\n\n for book_page in response.css('a.bookTitle').xpath('@href'):\n editions['urls'].append((urlparse.urljoin(response.url, book_page.extract())))\n \n next_page = response.xpath(\"//*[@rel='next']/@href\").extract_first()\n\n if next_page is not None and (len(editions['urls']) <= 200):\n yield response.follow(next_page, callback = self.parse_editions_url, meta = {'editions' : editions})\n else:\n print(editions['name'])\n print(len(editions['urls']))\n for link in editions['urls']:\n request = scrapy.Request(link, callback = self.parse_editions_data, meta = {'all_books_data' : all_books_data, 'name' : editions['name']})\n yield request\n #yield editions\n editions = {}\n \n\n def parse_editions_data(self, response):\n name = response.xpath('//*[@id=\"bookTitle\"]//text()').extract_first()\n original_name = response.meta['name']\n #all_books_data = response.meta['all_books_data']\n language = response.xpath('//*[@itemprop=\"inLanguage\"]//text()').extract_first()\n ratingText = response.xpath('//*[@id=\"bookMeta\"]/script').get()\n values = self.extractRatingAndNumOfRaters(ratingText)\n edition = BookEditionItem(name = name, original_name = original_name, language = language, averageRating = values['rating'], numOfRaters = values['numOfRaters'])\n\n #if(original_name not in all_books_data):\n # all_books_data[original_name] = []\n\n #all_books_data[original_name].append(edition)\n #yield all_books_data\n if(int(values['numOfRaters']) > 100 and language != None):\n yield edition\n else :\n yield\n\n \n def extractRatingAndNumOfRaters(self, ratingText):\n values = {}\n text = ratingText.replace('', '').rstrip().replace('\\\\n', '').replace('\\\\','').replace('//', '').replace(' ', '')\n new = re.search('
', text).group()\n str = new.encode('utf8')\n tree = html.fromstring(str)\n rating = tree.xpath('//*[@id=\"moreBookData\"]')[0][2][1][1][0].text\n values['rating'] = rating\n numOfRaters = tree.xpath('//*[@id=\"moreBookData\"]')[0][2][1][1][1].text\n values['numOfRaters'] = numOfRaters\n return values","sub_path":"goodreads/goodreads/spiders/editionExtractor.py","file_name":"editionExtractor.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}