diff --git "a/3007.jsonl" "b/3007.jsonl"
new file mode 100644--- /dev/null
+++ "b/3007.jsonl"
@@ -0,0 +1,657 @@
+{"seq_id":"516427612","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Handlers related to Users.\"\"\"\nimport httplib\nimport logging\n\nfrom upvote.gae.datastore.models import base as base_db\nfrom upvote.gae.modules.upvote_app.api import monitoring\nfrom upvote.gae.modules.upvote_app.api.handlers import base\nfrom upvote.gae.shared.common import handlers\nfrom upvote.gae.shared.common import user_map\nfrom upvote.gae.shared.common import xsrf_utils\nfrom upvote.shared import constants\n\n\nclass UserQueryHandler(base.BaseQueryHandler):\n \"\"\"Handler for querying users.\"\"\"\n\n MODEL_CLASS = base_db.User\n HAS_INTEGRAL_ID_TYPE = False\n\n @property\n def RequestCounter(self):\n return monitoring.user_requests\n\n @base.RequireCapability(constants.PERMISSIONS.VIEW_OTHER_USERS)\n @handlers.RecordRequest\n def get(self):\n self._Query()\n\n\nclass UserHandler(base.BaseHandler):\n \"\"\"Handler for interacting with individual users.\"\"\"\n\n def get(self, user_id=None): # pylint: disable=g-bad-name\n logging.debug('UserHandler GET method called with ID: %s', user_id)\n if not user_id or self.user.email == user_id:\n user = self.user\n else:\n user = self._get_another_user(user_id)\n if user:\n user_info = user.to_dict()\n user_info.update({\n 'name': user.nickname,\n 'permissions': user.permissions,\n 'is_admin': user.is_admin,\n })\n self.respond_json(user_info)\n else:\n self.abort(httplib.NOT_FOUND, explanation='User not found')\n\n @base.RequireCapability(constants.PERMISSIONS.VIEW_OTHER_USERS)\n def _get_another_user(self, user_id):\n return base_db.User.GetById(user_id)\n\n @base.RequireCapability(constants.PERMISSIONS.EDIT_USERS)\n @xsrf_utils.RequireToken\n def post(self, user_id):\n \"\"\"Post handler for users.\"\"\"\n\n logging.debug('UserHandler POST method called with ID: %s', user_id)\n email_addr = user_map.UsernameToEmail(user_id)\n\n new_roles = self.request.get_all('roles')\n base_db.User.SetRoles(email_addr, new_roles)\n\n user = base_db.User.GetOrInsert(email_addr=email_addr)\n self.respond_json(user)\n","sub_path":"upvote/gae/modules/upvote_app/api/handlers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"236936243","text":"\"\"\" \nPython Cognate Matrix Class\nStuart Bradley - 5931269\n29-07-2015\n\nThis class is a container for a set of language classes, as well as \nmethods that act upon these languages. \n\nIt it's current state, 80 randomized languages are produced. \n\nLanguage evolution can occur under three models:\n- CMTC\n- Covarion\n- Stollo-Dollo\n- Rate Variable\n\"\"\"\n\nfrom Language import Language\nimport random\nfrom bisect import bisect\nimport math\n\nclass CognateSet:\n\t# Produces a cognate matrix, with it's sequence of binary cognates.\n\tdef __init__(self, langs=[]):\n\t\tself.language_list = langs\n\t\tself.stollo_length = len(self.language_list[0].sequence)\n\n\t# Returns a language specified by it's name.\n\tdef find_lang(self,name):\n\t\tfor i in self.language_list:\n\t\t\tif i.name == name:\n\t\t\t\treturn i\n\n\t# Gets the binary vector for a particular cognate. \n\tdef get_cognate_set(self,n):\n\t\tc_s = []\n\t\ttry: \n\t\t\tfor language in language_list:\n\t\t\t\tc_s.append(language.sequence[n])\n\t\texcept IndexError:\n\t\t\treturn \n\n\t# Produces an exponentially distributed random variable.\n\tdef exponential(self,rate):\n\t\treturn -math.log(random.random())/rate\n\n\tdef weighted_random(self, items, probabilities):\n\t\tcdf = [probabilities[0]]\n\t\tfor i in xrange(1, len(probabilities)):\n\t\t\tcdf.append(cdf[-1] + probabilities[i])\n\t\trandom_ind = bisect(cdf,random.random())\n\t\treturn items[random_ind]\n\n\n\t# Mutates language traits according to the reversible \n\t# continuous time Markov chain model. \n\t# Given a probability matrix.\n\tdef mutate_language_GTR_timed(self, lang, Q, T):\n\t\tnew_lang = Language(seq=lang.sequence)\n\t\tfor i in range(len(new_lang.sequence)):\n\t\t\trate = Q.get_exp_rate(new_lang.sequence[i])\n\t\t\tt = self.exponential(-rate)\n\t\t\twhile t < T:\n\t\t\t\tmutatable_items = list(Q.items)\n\t\t\t\tmutatable_items.remove(new_lang.sequence[i])\n\t\t\t\tnew_lang.sequence[i] = self.weighted_random(mutatable_items, Q.get_rate_probs(new_lang.sequence[i]))\n\t\t\t\trate = Q.get_exp_rate(new_lang.sequence[i])\n\t\t\t\tt += self.exponential(-rate)\n\t\tself.language_list.append(new_lang)\n\t\treturn new_lang\n\n\t# Mutates language traits according to the reversible \n\t# continuous time Markov chain model. \n\t# Given a probability matrix.\n\t# Differs from above by not computing each individual mutation.\n\tdef mutate_language_GTR_timed_2(self, lang, Q, T):\n\t\tnew_lang = Language(seq=lang.sequence)\n\t\tQ.create_pMatrix(T)\n\t\tfor i in range(len(new_lang.sequence)):\n\t\t\tnew_lang.sequence[i] = self.weighted_random(Q.items, Q.get_p_row(new_lang.sequence[i]))\n\t\tself.language_list.append(new_lang)\n\t\treturn new_lang\n\n\t# Mutates language traits according to the Stochastic-Dollo model.\n\t# Stops once time is exceeded for each trait.\n\tdef mutate_language_stochastic_dollo_timed(self, lang, b, d, T):\n\t\tlang = Language(seq=lang.sequence)\n\t\t# Rand_exp at rate b + d * k\n\t\tt = self.exponential(b + d*lang.get_births())\n\t\t# Total rate for uniform generation.\n\t\twhile t < T:\n\t\t\t# Death:\n\t\t\tif self.weighted_random(['death', 'birth'], [(d * lang.get_births() /(b+d*lang.get_births())),(b/(b+d*lang.get_births()))]) == 'death':\n\t\t\t\t# Pick a random site that is not dead, and kill it.\n\t\t\t\twhile True:\n\t\t\t\t\ti = random.randint(0, len(lang.sequence) - 1)\n\t\t\t\t\tif lang.sequence[i] != 0:\n\t\t\t\t\t\tlang.sequence[i] = 0\n\t\t\t\t\t\tbreak\n\t\t\t\t# Birth:\n\t\t\telse:\n\t\t\t\tlang.sequence.append(1)\n\t\t\t\tself.stollo_length += 1\n\t\t\tt += self.exponential(b + d*lang.get_births())\t\n\t\tself.language_list.append(lang)\n\t\treturn lang","sub_path":"Utility Classes/CognateSet.py","file_name":"CognateSet.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"248917583","text":"\nimport nltk\nimport pandas as pd\nfrom nltk.corpus import stopwords\nimport matplotlib.pyplot as plt\nfrom nltk.corpus import sentiwordnet as swn\n\n# nltk.download('sentiwordnet')\n\n\n# class positive_negative():\n# def __init__(self):\n# self.output\n\nclass Sentiment():\n def get_output(self,company):\n if company==\"google\":\n return \"../backend/interview_sentiment/resurces/pos_neg_google.JPG\"\n elif company==\"apple\":\n return \"../backend/interview_sentiment/resurces/pos_neg_apple.JPG\"\n elif company == \"microsoft\":\n return \"../backend/interview_sentiment/resurces/pos_neg_microsoft.JPG\"\n\n\n\n def pre_processing(self,df):\n # tokenize\n data = []\n for row in df.values:\n tokenize_text = nltk.word_tokenize(row[0])\n data.append(tokenize_text)\n\n # convert to lower case\n data_after_lower = []\n for content in data:\n lower_content = list(map(lambda word: word.lower(), content))\n data_after_lower.append(lower_content)\n\n # stopwords removel\n data_without_stop_words = []\n stop_words = set(stopwords.words('english'))\n for word in [\".\", \",\", \"(\", \")\", \"<\", \">\", \"br\", \"!\", \"/\", \"--\", \"n't\", \"'s\", \"''\", \"?\", \"...\", \"``\", \":\", \"-\", \"'\",\n \"would\", \";\", \"*\"]:\n stop_words.add(word)\n for content in data_after_lower:\n filtered_content = [w for w in content if not w in stop_words]\n data_without_stop_words.append(filtered_content)\n\n # lemmatization\n wnl = nltk.WordNetLemmatizer()\n clean_data = []\n for content in data_without_stop_words:\n lemmatize_content = [wnl.lemmatize(w) for w in content]\n clean_data.append(lemmatize_content)\n\n return clean_data\n\n\n def classify_data(self,clean_data):\n tagged_list = []\n final_docs_score = []\n score_list = []\n\n # Create POS tagging for each token in each doc\n tagged_list = []\n for content in clean_data:\n tagged_list.append(nltk.pos_tag(content))\n\n for idx, doc in enumerate(tagged_list):\n score_list.append([])\n for idx2, t in enumerate(doc): # t[0] word, t[1] pos tag\n newtag = ''\n if t[1].startswith('NN'):\n newtag = 'n'\n elif t[1].startswith('JJ'):\n newtag = 'a'\n elif t[1].startswith('V'):\n newtag = 'v'\n elif t[1].startswith('R'):\n newtag = 'r'\n else:\n newtag = ''\n if (newtag != ''):\n synsets = list(swn.senti_synsets(t[0], newtag))\n score = 0\n if (len(synsets) > 0):\n for syn in synsets:\n score += syn.pos_score() - syn.neg_score()\n score_list[idx].append(score / len(synsets)) # add score of each term in doc\n\n # Create final score to each doc(positive or negative)\n for score_sent in score_list:\n final_docs_score.append(sum([word_score for word_score in score_sent]) / len(score_sent))\n return final_docs_score\n\n\n # call functions\n def plot_pie_chart(self,num_positive, num_negative):\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = 'Positive reviews', 'Negative reviews'\n sizes = [num_positive, num_negative]\n explode = (0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\n plt.show()\n\n\n def percentage(self,part, whole):\n return 100 * float(part) / float(whole)\n\n\n def summarize_reviews(self,reviews_score):\n positive = 0\n negative = 0\n for score in reviews_score:\n if score > 0:\n positive += 1\n else:\n negative += 1\n print(\"The number of positive reviews is: \" + str(positive) + \"/\" + str(len(reviews_score)))\n print(\"The number of negative reviews is: \" + str(negative) + \"/\" + str(len(reviews_score)))\n\n print('\\n')\n\n print((\"The positive percentage number is: \" + str(round(self.percentage(positive, len(reviews_score)), 2)) + '%'))\n print((\"The negative percentage number is: \" + str(round(self.percentage(negative, len(reviews_score)), 2)) + '%'))\n\n self.plot_pie_chart(positive, negative)\n\nif __name__ == '__main__':\n path=r'../scrape_interviews/scraper_output/apple_softwareJobs_interviews.csv'\n\n interview_questions = pd.read_csv(path)\n reviews = interview_questions[['Interview']]\n reviews = reviews.dropna()\n sentiment = Sentiment()\n clean_data = sentiment.pre_processing(reviews)\n docs_score = sentiment.classify_data(clean_data)\n print(docs_score)\n sentiment.summarize_reviews(docs_score)\n\n","sub_path":"backend/interview_sentiment/positive_negative.py","file_name":"positive_negative.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"132219471","text":"\"\"\"\nCommand line utility for GitHub daily work.\n\"\"\"\nimport setuptools\nimport yogit\n\nDEPENDENCIES = [\"click\", \"tabulate\", \"requests\", \"requests-toolbelt\", \"PyYAML>=5.1\", \"pyperclip\"]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=yogit.__application__,\n version=yogit.__version__,\n author=\"Adrien Gavignet\",\n author_email=\"adrien.gavignet@gmail.com\",\n license=\"MIT\",\n description=\"Command line utility for GitHub daily work.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"git github utility branch pull requests\",\n url=\"https://github.com/hasboeuf/yogit\",\n packages=setuptools.find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests\"]),\n classifiers=[\"Programming Language :: Python :: 3\", \"Operating System :: OS Independent\"],\n zip_safe=True,\n install_requires=DEPENDENCIES,\n entry_points={\"console_scripts\": [\"yogit=yogit.yogit.cli:main\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"219575403","text":"# 1.Two_Conv代表两个卷积层拼接\n# 2.downsample代表下采样层\n# 3.upsample代表上采样层\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n def forward(self, x):\n x1, x2, _, __ = x.size()\n y = self.avg_pool(x).view(x1, x2)\n y = self.fc(y).view(x1, x2, 1, 1)\n y_out = y.expand_as(x)#匹配x\n return x * y_out\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n self.inc = Two_Conv(n_channels, 64)\n self.downsample1 = downsample(64, 128)\n self.selayer1=SELayer(128)#插入方式,确保前后维度一致\n self.downsample2 = downsample(128, 256)\n self.downsample3 = downsample(256, 512)\n factor = 2 if bilinear else 1\n self.downsample4 = downsample(512, 1024 // factor)\n self.upsample1 = upsample(1024, 512 // factor, bilinear)\n self.upsample2 = upsample(512, 256 // factor, bilinear)\n self.upsample3 = upsample(256, 128 // factor, bilinear)\n self.upsample4 = upsample(128, 64, bilinear)\n self.outc = OutConv(64, n_classes)\n\n def forward(self, x):\n x1 = self.inc(x)\n x2 = self.downsample1(x1)\n x2 = self.selayer1(x2)#插入方式\n x3 = self.downsample2(x2)\n x4 = self.downsample3(x3)\n x5 = self.downsample4(x4)\n x = self.upsample1(x5, x4)\n x = self.upsample2(x, x3)\n x = self.upsample3(x, x2)\n x = self.upsample4(x, x1)\n logits = self.outc(x)\n return logits","sub_path":"cv/segmentation/unet_SELayer.py","file_name":"unet_SELayer.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"624786850","text":"import os\nimport PyPDF2\nfrom PIL import Image\nimport img2pdf\nimport math\n\nPAGES_EACH_PDF = 20\n\ndef jpg2pdf(jpgFilename, pdfFilename):\n img = Image.open(jpgFilename)\n pdf = img2pdf.convert(jpgFilename)\n pdfFile = open(pdfFilename, 'wb')\n pdfFile.write(pdf)\n img.close()\n pdfFile.close()\n\ndef mergepdfs(inputFilenameList, outputFilename):\n merger = PyPDF2.PdfFileMerger()\n for inputPdfFile in inputFilenameList:\n merger.append(inputPdfFile)\n merger.write(outputFilename)\n merger.close()\n\ndef jpg2pdfByFolder(folder):\n files = os.listdir(folder)\n pdfFileList = []\n for filename in files:\n if not filename.endswith('.jpg'):\n continue\n\n pdfFilename = filename.replace('.jpg', '.pdf')\n print(f'{filename} -> {pdfFilename}')\n jpg2pdf(os.path.join(folder, filename), os.path.join(folder, pdfFilename))\n pdfFileList.append(os.path.join(folder, pdfFilename))\n\n pdfFileList = sorted(pdfFileList, key=lambda x: int(os.path.basename(x).split('.')[0]))\n for i in range(1, math.ceil(len(pdfFileList) / PAGES_EACH_PDF) + 1):\n startIdx = (i - 1) * PAGES_EACH_PDF\n stopIdx = min(i * PAGES_EACH_PDF, len(pdfFileList))\n mergepdfs(pdfFileList[startIdx : stopIdx], os.path.join(folder, f'{folder}_{i}.pdf'))\n\ndef main():\n imgDir = os.path.join(os.getcwd(), 'data/')\n allDirs = sorted(os.listdir(imgDir))\n dirs = [f for f in allDirs if os.path.isdir(os.path.join(imgDir, f))]\n for dir in dirs:\n jpg2pdfByFolder(os.path.join(imgDir, dir))\n\n return\n\n\n# entrance\nif __name__ == \"__main__\":\n main()\n","sub_path":"jpg_to_pdf.py","file_name":"jpg_to_pdf.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"541911519","text":"# coding: utf-8\n\n# 白银数据相关\n\nfrom lib.Bis import Ag\nfrom toolkit import url, BisReqHandler, Session\nfrom datetime import datetime, timedelta\nimport json\nimport urllib2\n\n\ndef qpath(p):\n dc = {}\n for o in p.split('&'):\n kv = o.split('=')\n dc[kv[0]] = kv[1]\n return dc\n\n@url(r'/ag')\n@url(r'/ag/(\\d+)')\nclass Index(BisReqHandler):\n \"\"\"docstring for index\"\"\"\n def get(self, i=40):\n i = int(i)\n session = Session()\n dt_ln = datetime.now() - timedelta(minutes=i)\n q = session.query(Ag).filter(Ag.date_create > dt_ln)\n dd = []\n\n for a in q:\n d = a.date_create\n dic = qpath(a.source)\n v = float([ a for a in dic['gold'].split('|') if len(a)][0])\n t = [int(a) for a in dic['time'].split(':')]\n #dt = datetime(d.year, d.month, d.day, t[0], t[1], t[2]) - datetime(1970, 1, 1, 0, 0, 0)\n dt = datetime(d.year, d.month, d.day, t[0], t[1], t[2]).strftime('%Y-%m-%d %H:%M:%S')\n\n dd.append((dt, v))\n\n dd = sorted(dd, key=lambda x: x[0])\n\n self.render('ag', js=json.dumps(dd), title='Ag Chart')\n\n@url(r'/ag/m')\n@url(r'/ag/m/(\\d+)')\n@url(r'/ag/m/(\\d+)/(\\d+)')\nclass AgM(BisReqHandler):\n \"\"\"docstring for ag_m\"\"\"\n\n def get(self, i=5, c=40):\n i, c = int(i), int(c)\n session = Session()\n dt_ln = datetime.now() - timedelta(minutes=i*c)\n q = session.query(Ag).filter(Ag.date_create > dt_ln)\n from lib.arr_math import group\n\n dt0 = datetime(1970, 1, 1)\n\n ts = lambda d: int((d - dt0).total_seconds())\n\n def gk(ag):\n return ts(ag.date_create) / (i * 60)\n\n dic = group(q, gk)\n\n def avg_1(ag_s):\n ss = [ float(qpath(ag.source)['gold'].split('|')[1]) for ag in ag_s]\n return sum(ss) / len(ss)\n\n ds_str = lambda x: (dt0 + timedelta(minutes=x*i)).strftime('%Y-%m-%d %H:%M:%S')\n\n dd = [(ds_str(k), avg_1(arr)) for k, arr in dic.iteritems()]\n\n dd = sorted(dd, key=lambda x: x[0])\n\n self.render('ag', js=json.dumps(dd), title='Ag Chart')\n\n@url(r'/ag/corn')\nclass Corn(BisReqHandler):\n \"\"\"docstring for corn\"\"\"\n def get(self):\n sss = Session()\n\n source = urllib2.urlopen('http://quote.zhijinwang.com/xml/ag.txt').read()\n\n lst = sss.query(Ag).order_by(Ag.id.desc()).first()\n gold = source.split('gold=')[1]\n if gold == lst.source.split('gold=')[1]:\n self.write('1')\n return\n one = Ag(source=source, date_create=datetime.now())\n if one.source[:8] == 'time=23:' and one.date_create.hour == 0:\n # SELECT * FROM `bis_ag` where source like 'time=23:58%' or source like 'time=23:59%'\n # 保证跨天数据, 不会出现 time=23:59:56&gold... 2013-08-03 00:00:32 的情况\n # 将今天的秒数减去, 变成昨天最后1秒\n one.date_create -= timedelta(seconds=one.date_create.second + 1)\n sss.add(one)\n sss.commit()\n self.write('1')\n\n\n\n\n","sub_path":"action/ag.py","file_name":"ag.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"295867149","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Normal Distribution\"\"\"\nimport numpy as np\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import composite as C\nfrom .distribution import Distribution\nfrom ._utils.utils import convert_to_batch, check_greater_equal_zero\nfrom ...common import dtype as mstype\nfrom ...context import get_context\n\nclass Normal(Distribution):\n \"\"\"\n Example class: Normal distribution.\n\n Args:\n mean (int, float, list, numpy.ndarray, Tensor, Parameter): mean of the Gaussian distribution.\n sd (int, float, list, numpy.ndarray, Tensor, Parameter): stddev of the Gaussian distribution.\n seed (int): seed to use in sampling. Default: 0.\n dtype (mindspore.dtype): type of the distribution. Default: mstype.float32.\n name (str): name of the distribution. Default: Normal.\n\n\n Note:\n Standard deviation should be greater than zero.\n\n Examples:\n >>> # To initialize a normal distribution of mean 3.0 and standard deviation 4.0\n >>> n = nn.Normal(3.0, 4.0, dtype=mstype.float32)\n >>> # The following create two independent normal distributions\n >>> n = nn.Normal([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32)\n \"\"\"\n\n def __init__(self,\n mean=None,\n sd=None,\n seed=0,\n dtype=mstype.float32,\n name=\"Normal\"):\n \"\"\"\n Constructor of normal distribution.\n \"\"\"\n param = dict(locals())\n super(Normal, self).__init__(dtype, name, param)\n if mean is not None and sd is not None:\n self._mean_value = convert_to_batch(mean, self._broadcast_shape, dtype)\n self._sd_value = convert_to_batch(sd, self._broadcast_shape, dtype)\n check_greater_equal_zero(self._sd_value, \"Standard deviation\")\n else:\n self._mean_value = mean\n self._sd_value = sd\n self.seed = seed\n\n #ops needed for the class\n self.exp = P.Exp()\n self.add = P.TensorAdd()\n self.mul = P.Mul()\n self.sq = P.Square()\n self.log = P.Log()\n self.sqrt = P.Sqrt()\n self.realdiv = P.RealDiv()\n self.expm1 = P.Expm1() if get_context('device_target') == 'Ascend' else self._expm1_by_step\n self.shape = P.Shape()\n self.zeroslike = P.ZerosLike()\n self.const = P.ScalarToArray()\n\n def extend_repr(self):\n str_info = f'mean = {self._mean_value}, standard deviation = {self._sd_value}'\n return str_info\n\n def _expm1_by_step(self, x):\n \"\"\"\n Expm1 ops under GPU context.\n \"\"\"\n return self.add(self.exp(x), -1)\n\n def _mean(self, name='mean', mean=None, sd=None):\n \"\"\"\n Mean of the distribution.\n \"\"\"\n if name == 'mean':\n mean = self._mean_value if mean is None or sd is None else mean\n return mean\n return None\n\n def _sd(self, name='sd', mean=None, sd=None):\n \"\"\"\n Standard deviation of the distribution.\n \"\"\"\n if name in ('sd', 'var'):\n sd = self._sd_value if mean is None or sd is None else sd\n return sd\n return None\n\n def _log_likelihood(self, name, value, mean=None, sd=None):\n r\"\"\"\n Evaluate log probability.\n\n .. math::\n L(x) = -1* \\fract{(x - \\mu)^2}{2. * \\sigma^2} - \\log(\\sqrt(2* \\pi * \\sigma^2))\n \"\"\"\n if name in ('prob', 'log_prob'):\n mean = self._mean_value if mean is None else mean\n sd = self._sd_value if sd is None else sd\n unnormalized_log_prob = -1. * self.realdiv(self.sq(self.add(value, -1. * mean)),\n 2. * self.sq(sd))\n neg_normalization = -1. * self.log(self.sqrt(2. * np.pi * self.sq(sd)))\n return self.add(unnormalized_log_prob, neg_normalization)\n return None\n\n def _kl_loss(self, name, dist, mean_b, sd_b, mean_a=None, sd_a=None):\n r\"\"\"\n Evaluate Normal-Normal kl divergence, i.e. KL(a||b).\n\n Args:\n name (str): name of the funtion passed in from construct. Should always be \"kl_loss\".\n dist (str): type of the distributions. Should be \"Normal\" in this case.\n mean_b (Tensor): mean of distribution b.\n sd_b (Tensor): standard deviation distribution b.\n mean_a (Tensor): mean of distribution a. Default: self._mean_value.\n sd_a (Tensor): standard deviation distribution a. Default: self._sd_value.\n\n .. math::\n KL(a||b) = 0.5 * (\\fract{MEAN(a)}{STD(b)} - \\fract{MEAN(b)}{STD(b)}) ^ 2 +\n 0.5 * EXPM1(2 * (\\log(STD(a)) - \\log(STD(b))) - (\\log(STD(a)) - \\log(STD(b)))\n \"\"\"\n if name == 'kl_loss' and dist == 'Normal':\n mean_a = self._mean_value if mean_a is None else mean_a\n sd_a = self._sd_value if sd_a is None else sd_a\n diff_log_scale = self.add(self.log(sd_a), - self.log(sd_b))\n squared_diff = self.sq(self.add(self.realdiv(mean_a, sd_b), - self.realdiv(mean_b, sd_b)))\n return self.add(self.add(0.5 * squared_diff, 0.5 * self.expm1(2 * diff_log_scale)), - diff_log_scale)\n return None\n\n def _sample(self, name, shape=(), mean=None, sd=None):\n \"\"\"\n Sampling.\n\n Args:\n name (str): name of the function. Should always be 'sample' when passed in from construct.\n shape (tuple): shape of the sample. Default: ().\n mean (Tensor): mean of the samples. Default: self._mean_value.\n sd (Tensor): standard deviation of the samples. Default: self._sd_value.\n\n Returns:\n Tensor, shape is shape + batch_shape.\n \"\"\"\n if name == 'sample':\n mean = self._mean_value if mean is None else mean\n sd = self._sd_value if sd is None else sd\n batch_shape = self.shape(self.add(self.zeroslike(mean), self.zeroslike(sd)))\n sample_shape = shape + batch_shape\n mean_zero = self.const(0.0)\n sd_one = self.const(1.0)\n sample_norm = C.normal(sample_shape, mean_zero, sd_one, self.seed)\n sample = self.add(mean, self.mul(sample_norm, sd))\n return sample\n return None\n","sub_path":"mindspore/nn/distribution/normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"418077745","text":"import math\r\nimport itertools as it\r\n\r\n#http://stackoverflow.com/questions/18833759/python-prime-number-checker\r\ndef is_prime(n):\r\n '''check if integer n is a prime'''\r\n\r\n # make sure n is a positive integer\r\n n = abs(int(n))\r\n\r\n # 0 and 1 are not primes\r\n if n < 2:\r\n return False\r\n\r\n # 2 is the only even prime number\r\n if n == 2: \r\n return True \r\n\r\n # all other even numbers are not primes\r\n if not n & 1: \r\n return False\r\n\r\n # range starts with 3 and only needs to go up \r\n # the square root of n for all odd numbers\r\n for x in range(3, int(n**0.5) + 1, 2):\r\n if n % x == 0:\r\n return False\r\n\r\n return True\r\n\r\nwith open(\"output2.txt\", \"w\") as out:\r\n\tnum = 16\r\n\tn = 50\r\n\tr = [[1,0],[0,1]]*8\r\n\tlista = []\r\n\r\n\tout.write(\"Case #1:\\n\")\r\n\tcont = 0\r\n\tfor item in it.product(*r):\r\n\t\tprint('1')\r\n\t\tif item[0] == 1 and item[len(item)-1] == 1:\r\n\t\t\tsoma = []\r\n\t\t\tfor i in range(2,11):\r\n\t\t\t\tsomap = 0\r\n\t\t\t\tj = len(item)-1\r\n\t\t\t\tfor ind in item:\r\n\t\t\t\t\tif ind == 1:\r\n\t\t\t\t\t\tsomap += ind*(i**j)\r\n\t\t\t\t\tj -= 1\r\n\r\n\t\t\t\tif somap == 0:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tif is_prime(somap):\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\tsoma.append(somap)\r\n\r\n\t\t\tif len(soma) == 9:\r\n\t\t\t\tfinal = []\r\n\t\t\t\tfor s in soma:\r\n\t\t\t\t\tfor r in range(2,s-1):\r\n\t\t\t\t\t\tif s%r == 0:\r\n\t\t\t\t\t\t\tfinal.append(r)\r\n\t\t\t\t\t\t\tprint(soma)\r\n\t\t\t\t\t\t\tprint('final',final)\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\tprint('2')\r\n\t\t\t\tif len(final) == 9:\r\n\t\t\t\t\tf = ''\r\n\t\t\t\t\tfor u in final:\r\n\t\t\t\t\t\tf += str(u)\r\n\t\t\t\t\t\tf += ' '\r\n\t\t\t\t\th = ''\r\n\t\t\t\t\tfor e in item:\r\n\t\t\t\t\t\th += str(e)\r\n\t\t\t\t\tif not h in lista:\r\n\t\t\t\t\t\tlista.append(h)\r\n\t\t\t\t\t\tout.write(\"{0} {1}\\n\".format(h,f))\r\n\r\n\t\t\t\t\t\tcont +=1\r\n\t\t\t\t\t\tprint('3')\r\n\t\t\t\t\tif cont == n:\r\n\t\t\t\t\t\tbreak\r\n\r\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_MatheusDMD_cj3.py","file_name":"16_0_3_MatheusDMD_cj3.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"546035103","text":"\n\n#calss header\nclass _ATLAS():\n\tdef __init__(self,): \n\t\tself.name = \"ATLAS\"\n\t\tself.definitions = [u'a book containing maps: ', u'a book containing maps showing where particular things are made, found, etc.: ', u'the first vertebra (= bone) of the spine, that supports the skull: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_atlas.py","file_name":"_atlas.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"420535227","text":"import random\nimport numpy as np\nfrom natsort import natsorted\nimport os, sys, argparse, glob\nimport skimage.io as io\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\n\nfrom ZebrDataset import ZebrDataset\nfrom Unet import *\nfrom Logger import Logger\nimport multiprocessing as mp\nfrom tqdm import tqdm\nimport time\n\n\n# torch.multiprocessing.set_start_method('spawn')\n\nLOG_PERIOD = 5\nLOG_DIR = 'log_dir/'\nCHECKPOINT_SAVE_PATH = 'checkpoints/'\nSAVE_PERIOD = 5\nINPUT_SHAPE = (1, 64, 64, 64)\nIN_CH = 1\nOUT_CH = 1\nFEATURES = [8, 16, 32, 64]\nBATCH_SIZE = 3\n\ndef read_im (paths, downsample=1):\n ret = []\n for path in paths:\n ret.append (io.imread (path)[::downsample,::downsample,::downsample])\n return ret\n\ndef save_checkpoint (state, path=CHECKPOINT_SAVE_PATH):\n # print ('Checkpoint saved')\n torch.save (state, path)\n\ndef train (train_data, n_epoc, loss_func, optimizer, lr_scheduler, i_iter=0):\n\n logger = Logger (LOG_DIR)\n\n for i_ipoc in range (n_epoc):\n pbar = tqdm (total=len (train_data), ascii=True)\n # print('ipoc ' + str (i_ipoc), ' len epoch ', str (len (train_data)))\n ipoc_loss = 0\n \n for i_batch, sample in enumerate (train_data):\n if i_batch == len (train_data):\n break\n pbar.update (1)\n raw = torch.tensor (sample['raw'], device=device, dtype=torch.float32) / 255.0\n target = torch.tensor (sample['lbl'], device=device, dtype=torch.float32) / 255.0\n pred = model (raw)\n \n loss = loss_func (pred, target)\n\n optimizer.zero_grad ()\n loss.backward ()\n optimizer.step ()\n\n ipoc_loss += loss.item () / len (train_data)\n lr_scheduler.step ()\n\n if i_batch == len (train_data) - 1 and i_ipoc % LOG_PERIOD == 0:\n sys.stdout.flush ()\n # print ('\\nWriting log')\n info = {'loss': ipoc_loss, 'learning_rate': lr_scheduler.get_lr () [0]}\n for tag, value in info.items ():\n logger.scalar_summary (tag, value, i_iter)\n\n raw = np.expand_dims (raw.detach ().cpu ().numpy() [:,0,:,:], -1)\n target = np.expand_dims (target.detach ().cpu ().numpy ()[:,0,:,:], -1)\n pred = np.expand_dims (pred.detach ().cpu ().numpy ()[:,0,:,:], -1)\n\n # print (raw.shape, target.shape, pred.shape)\n\n for tag, value in model.named_parameters ():\n tag = tag.replace ('.', '/')\n logger.histo_summary (tag, value.data.cpu ().numpy (), i_iter)\n\n info = {'train_imgs': [raw, target, pred]}\n for tag, vols in info.items ():\n for i_img in range (len (vols[0])):\n raw, target, pred = vols[0][i_img], vols[1][i_img], vols[2][i_img]\n raw = (raw * 255).astype (np.uint8)\n target = (target * 255).astype (np.uint8)\n pred = (pred * 255).astype (np.uint8)\n\n z_range, y_range, x_range, nchannel = raw.shape\n z, y, x = z_range // 2, y_range // 2, x_range // 2\n # print (vol.shape)\n yx_raw = raw [z,:,:]\n zx_raw = raw [:,y,:]\n yx_lbl = target [z,:,:]\n zx_lbl = target [:,y,:]\n yx_pre = pred [z,:,:]\n zx_pre = pred [:,y,:]\n yx_log_img = np.concatenate ([yx_raw, yx_lbl, yx_pre], 0)\n zx_log_img = np.concatenate ([zx_raw, zx_lbl, zx_pre], 0)\n log_img = np.concatenate ([yx_log_img, zx_log_img], 1)\n log_img = np.expand_dims (np.repeat (log_img, 3, -1), 0)\n logger.image_summary (tag + '_' + str (i_img), log_img, i_iter)\n\n\n i_iter += 1\n\n pbar.close ()\n time.sleep (1.0)\n pbar.write (s ='ipoc ' + str (i_ipoc) + ' iter ' + str (i_iter) + ' loss ' + str (ipoc_loss))\n\n if i_ipoc % SAVE_PERIOD == 0:\n # tqdm.write ('Checkpoint saved')\n save_checkpoint ({\n 'i_iter': i_iter,\n 'state_dict': model.state_dict (),\n 'optimizer': optimizer.state_dict ()\n }, CHECKPOINT_SAVE_PATH + 'checkpoint_' + str (i_iter) + '.pth.tar')\n # pbar ('\\nipoc ' + str (i_ipoc) + ' iter ' + str (i_iter) + ' loss ', str (ipoc_loss))\n\ndef get_data ():\n base_path = '../DATA/'\n train_path = natsorted (glob.glob(base_path + 'trainA/*.tif'))\n train_label_path = natsorted (glob.glob(base_path + 'trainB/*.tif'))\n X_train = read_im (train_path, downsample=1)\n y_train = read_im (train_label_path, downsample=1)\n\n return X_train [0], y_train[0]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma seperated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n \n args = parser.parse_args()\n checkpoint_path = None\n\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.load:\n checkpoint_path = args.load\n\n print ('Using GPU', os.environ['CUDA_VISIBLE_DEVICES'])\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n # Setup dataflow\n X_train, y_train = get_data ()\n zebrafish_data = ZebrDataset ('train', X_train, y_train, size=INPUT_SHAPE, device=device)\n train_data = DataLoader (zebrafish_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\n\n # Setup model\n model = Unet (IN_CH, FEATURES, OUT_CH).to (device)\n optimizer = optim.Adam (model.parameters (), lr=1e-4)\n loss_func = nn.BCELoss ()\n lr_scheduler = optim.lr_scheduler.StepLR (optimizer, step_size=100, gamma=0.999)\n i_iter = 0\n\n # Load checkpoint\n if checkpoint_path is not None:\n checkpoint = torch.load (checkpoint_path)\n model.load_state_dict (checkpoint['state_dict'])\n i_iter = checkpoint['i_iter']\n optimizer.load_state_dict (checkpoint['optimizer'])\n\n # Train model\n train (train_data, 10000000, loss_func, optimizer, lr_scheduler, i_iter=i_iter)\n\n ","sub_path":"log_seg/seg_net/train_unet.py","file_name":"train_unet.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"468180422","text":"from django.urls import path\nfrom .views import statistics, contributor, donate, faq, profile\n\nurlpatterns = [\n\tpath('treasuryStats/', statistics.treasuryStats, name=\"treasuryStats\"),\n\tpath('govtStats/', statistics.govtStats, name=\"govtStats\"),\n\tpath('contributors/', contributor.contributors, name=\"contributors\"),\n\tpath('donates/', donate.donates, name=\"donates\"),\n\tpath('faqs/', faq.faqs, name=\"faqs\"),\n\tpath('profile/', profile.profiles, name=\"profiles\")\n]","sub_path":"v2/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"44854412","text":"# !-*- encoding=utf-8 -*-\n\"\"\"\n日志配置模块\n\nspider_logging.py create by v-zhidu\n\"\"\"\n\nimport logging\n\n\nclass SpiderLogging(object):\n \"\"\"\n 日志配置类\n\n spider_logging.py create by v-zhidu\n \"\"\"\n\n def __init__(self, name):\n self._logger = logging.getLogger(name)\n self.configure_logging()\n\n @property\n def logger(self):\n \"\"\"\n 返回logger实例\n \"\"\"\n return self._logger\n\n def configure_logging(self):\n \"\"\"\n 配置日志的具体方法\n \"\"\"\n self._logger.setLevel(logging.INFO)\n self.configure_console_handler()\n self.configure_file_handler()\n\n def configure_console_handler(self):\n \"\"\"\n 配置控制台handler\n \"\"\"\n # 设置样式\n formatter = logging.Formatter(\n '%(asctime)s %(filename)s[line:%(lineno)d] %(process)d %(thread)d - %(levelname)s - %(message)s')\n\n # 控制台handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n # 添加handler\n self._logger.addHandler(console_handler)\n\n def configure_file_handler(self):\n \"\"\"\n 配置文件handler\n \"\"\"\n # 设置样式\n formatter = logging.Formatter(\n '%(asctime)s %(filename)s[line:%(lineno)d] %(process)d %(thread)d - %(levelname)s - %(message)s')\n\n # 控制台handler\n log_folder = './log/'\n logfile = 'spider.log'\n # 检查文件夹是否存在\n import os\n\n if not os.path.exists(log_folder):\n os.mkdir(log_folder)\n\n file_handler = logging.FileHandler(\n os.path.join(log_folder, logfile), mode='w')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n\n # 添加handler\n self._logger.addHandler(file_handler)\n\n\nif __name__ == '__main__':\n logger = SpiderLogging('test').logger\n logger.info('this is a info messages.')\n logger.debug('this is a info messages.')\n","sub_path":"spider_logging.py","file_name":"spider_logging.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"454473372","text":"n = int(input('enter the number of lines of fibonacci triangle: '))\r\na = 0\r\nb = 1\r\nprint(1, '\\n')\r\nfor i in range(2, n+1):\r\n for j in range(i):\r\n m = a + b\r\n print(m, end='\\t')\r\n a = b\r\n b = m\r\n print('\\n')\r\n\r\n","sub_path":"Day2/day2 task1.py","file_name":"day2 task1.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"214489969","text":"#! -*- coding: utf-8 -*-\nimport sys\nimport sqlite3\n\nphrase = raw_input(\"Enter a phrase: \")\n\ndef mark_terms(definition, words):\n for word in words:\n count = definition.count(word)\n ind = -1\n chars = '.,?!->< )(;:'\n if count:\n for i in xrange(count):\n tmp_definition = \"\"\n ind = definition.find(word, ind+18+1+len(word))\n if (definition[ind-1] in chars) and (definition[ind+len(word)] in chars):\n tmp_definition += definition[:ind]\n tmp_definition += ''\n tmp_definition += word\n tmp_definition += ''\n tmp_definition += definition[ind+len(word):]\n definition = tmp_definition[:]\n\n return definition\n\n\ndef tmp(terms_list, phrase_list):\n terms_dict ={}\n for i in terms_list:\n tmp_list = []\n term = i.split()\n for word in phrase_list:\n if word not in term:\n tmp_list.append(word)\n terms_dict[i] = tmp_list\n\n return terms_dict\n\ndef create_html(to_html, phrase_dict, terms_list):\n \"\"\"\n Generate html file of term's definitions from db.\n \"\"\"\n f = open('../tmp.html', 'w')\n html = \"\"\"\n \n \n
\n \n
\n
\n
\n \"\"\" % (phrase)\n\n for term in terms_list:\n html += '- '\n for i in xrange(len(to_html[term])):\n i += 1\n if i == 1:\n link_text = term\n else:\n link_text = str(i)\n if (i == 1) and (len(to_html[term]) > 1):\n link_text += ' 1 '\n html += ' %s ' % (term, i, link_text)\n html += '
'\n\n\n html += \"\"\"\n
\n
\n\n
\n \"\"\"\n start_div = '
'\n for term in terms_list:\n count = 0\n for i in to_html[term]:\n count += 1\n marked_definition = mark_terms(i, phrase_dict[term])\n div = ''\n div = '
' % (term, count) + start_div + '
' + term + '
' + marked_definition + end_div\n html += '\\n' + div\n html += \"\"\"\n
\n
\n\n \n \n \n \n \n \n \"\"\"\n\n # Writing into html file.\n print >> f, html.encode('utf-8')\n\n f.close()\n\n\ndef phrase_determ(phrase):\n \"\"\"\n Creates all possible values of five adjacent words.\n \"\"\"\n new_phrase = ''\n\n # Removes useless characters and leads string to lowercase.\n # Converts the string to the list.\n for i in phrase:\n if i not in '.,':\n new_phrase += i\n new_phrase = new_phrase[:].lower().split()\n\n all_variations = []\n\n for i in xrange(len(new_phrase)): \n indexs = []\n indexs.append(i)\n\n # These two loops looking for coincidence of values in the database for\n # all possible combinations of five adjacent words.\n for j in xrange(1,5):\n if i+j < len(new_phrase):\n indexs.append(i+j)\n\n for q in xrange(2**(len(indexs)-1), 2**(len(indexs))):\n bin_str = bin(q)[2:]\n count = 0\n variation = []\n for cha in bin_str:\n if cha == '1':\n index_of_set = count\n variation.append(new_phrase[indexs[index_of_set]])\n count += 1\n\n # List of all possible values.\n all_variations.append(variation)\n\n # Creates 'set' to remove repetitive combinations.\n all_terms_var = set()\n for variation in all_variations:\n term = ''\n for word in variation:\n term += word + ' '\n all_terms_var.add(term[:-1])\n\n return all_terms_var, new_phrase\n\n\ndef get_values(terms_set):\n \"\"\"\n Get term's values and theirs definitions from database.\n \"\"\"\n connect = sqlite3.connect(\"../dict.db\")\n\n to_html = {}\n terms_list = []\n\n for term in terms_set:\n cursor = connect.cursor()\n try:\n azaza = cursor.execute(\"SELECT word, descr FROM dictionary WHERE word = \\\"%s\\\" \" % term)\n for word, descr in azaza:\n if word not in to_html:\n to_html[word] = []\n to_html[word].append(descr)\n if word not in terms_list:\n terms_list.append(word)\n except sqlite3.OperationalError:\n pass\n\n terms_list.sort()\n # Returns list something like [[term, definition],...]\n return to_html, terms_list\n\nterms_variety_set, phrase_list = phrase_determ(phrase)\n\nto_html, terms_list = get_values(terms_variety_set)\n\nterms_dict = tmp(terms_list, phrase_list)\n\ncreate_html(to_html, terms_dict, terms_list)\n\n","sub_path":"generate_html.py","file_name":"generate_html.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"218534575","text":"import os\nimport shutil\nimport re\nimport numpy as np\nimport torch\nfrom operator import itemgetter\nimport sys\n\nBEST_OF_THE_BEST = int(sys.argv[1])\nsolutions = []\n\nfor solution_file_name in os.listdir('./elites/'):\n solution_score = re.split(\"[. _]\", solution_file_name)[1]\n solutions.append((solution_file_name, int(solution_score)))\n\nsorted_member_performances = sorted(solutions,\n key=itemgetter(1),\n reverse=True)\naccepted = [x[0] for x in sorted_member_performances[0:BEST_OF_THE_BEST]]\n\nbest_score_in_generation = sorted_member_performances[0][1]\nbest_solution_in_generation = sorted_member_performances[0][0]\n\nif len(list(os.listdir('./best/'))) == 0:\n shutil.copyfile('./elites/' + best_solution_in_generation, './best/' + best_solution_in_generation)\nelse:\n for best_file_name in os.listdir('./best/'):\n best_score = re.split(\"[. _]\", best_file_name)[1]\n print(best_score)\n print(best_score_in_generation)\n if int(best_score_in_generation) > int(best_score):\n os.remove('./best/' + best_file_name)\n shutil.copyfile('./elites/' + best_solution_in_generation, './best/' + best_solution_in_generation)\n\nfor x in sorted_member_performances:\n if x[0] not in accepted:\n os.remove('./elites/' + x[0])\n","sub_path":"keep_best_elites.py","file_name":"keep_best_elites.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"168558686","text":"\"\"\"\nDefine base Program class.\n\nNotes\n-----\nRuning ``from reapy.tools import Program`` only imports this\n``Program`` class if called from inside REAPER. If not, then the\nsubclass ``reapy.tools.dist_program.Program``, which overrides\n``Program.run``, is imported.\n\"\"\"\n\nimport reapy\nfrom reapy import reascript_api as RPR\n\n\nclass Program:\n\n def __init__(self, code, *output):\n \"\"\"\n Build program.\n\n Parameters\n ----------\n code : str\n Code to execute. Note that if all lines except the empty first ones\n have constant indentation, this indentation is removed (allows for\n docstring code).\n output : iterable of str\n Variable names for which values at the end of the program are\n returned after execution.\n \"\"\"\n self._code = self.parse_code(code)\n self._output = tuple(output)\n\n def to_dict(self):\n \"\"\"\n Return dict representation of program.\n\n Returns\n -------\n rep : dict\n dict representation of program. A new program with same state can\n be created from `rep` with `Program(**rep)`.\n \"\"\"\n return (self._code,) + self._output\n\n def parse_code(self, code):\n \"\"\"\n Return code with correct indentation.\n\n Parameters\n ----------\n code : str\n Code to be parsed.\n\n Returns\n -------\n code : str\n Parsed code.\n \"\"\"\n code = code.replace(\"\\t\", \" \"*4)\n lines = code.split(\"\\n\")\n while lines[0] == \"\":\n lines.pop(0)\n indentation = len(lines[0]) - len(lines[0].lstrip(\" \"))\n lines = [line[indentation:] for line in lines]\n code = \"\\n\".join(lines)\n return code\n\n def run(self, **input):\n \"\"\"\n Run program and return output.\n\n Parameters\n ----------\n input : dict\n Dictionary with variable names as keys variables values as values.\n Passed as input to the program when running.\n\n Returns\n -------\n output : tuple\n Output values.\n \"\"\"\n input.update({\"RPR\": RPR, \"reapy\": reapy})\n exec(self._code, input)\n output = tuple(input[o] for o in self._output)\n return output\n","sub_path":"reapy/tools/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"109622428","text":"from scrapy.contrib.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.http.request import Request\nfrom scrapy.selector.lxmlsel import HtmlXPathSelector\nfrom link2link.items import Link2LinkItem\nfrom scrapy import cmdline\n\n__author__ = 'Gaurang_Shah1'\n\nclass FetchDetails(CrawlSpider):\n name = \"link2links\"\n allowed_domains = [\"link2linkco.com\"]\n start_urls = [\"http://link2linkco.com/OurBrands.html\"]\n\n\n brand=[]\n products_category=[\"N/A\"]\n product_name=[]\n product_description=\"\"\n product_graphic_name=\"\"\n product_graphic_directory=\"\"\n specification=\"\"\n guaranteed_analysis=\"\"\n\n def get_url(self,string):\n \"\"\"Return complete url\"\"\"\n return \"http://link2linkco.com/\" + string\n\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n brands = hxs.select(\"//div[@id='contentFull']/div/p/a/@href\")\n # self.item = Link2LinkItem()\n item = Link2LinkItem()\n for brand in brands:\n brand_page = brand.extract()\n request = Request(self.get_url(brand_page), callback=self.parse_brands,meta={'item':item})\n yield request\n\n\n def parse_brands(self, response):\n\n hxs = HtmlXPathSelector(response)\n brands = hxs.select(\"//div[@id='contentFull']/fieldset[2]/div/p/a/@href\")\n # for brand in brands:\n item = Link2LinkItem(response.meta['item'])\n products_category = hxs.select(\"//*[@id='contentFull']/fieldset[2]/div/p[2]/a/text()\").extract()\n item['Brand'] = hxs.select(\"//*[@id='contentFull']/h1/text()\").extract()\n if \"Products\" in hxs.select(\"//*[@id='contentFull']/fieldset[2]/legend/text()\").extract()[0]:\n #Catagory exsist, i.e. Dog, Cat\n all_catagories_links = hxs.select(\"//*[@id='contentFull']/fieldset[2]/div/p[2]/a/@href\").extract()\n index=0\n # c_list=[]\n for product in products_category:\n item = Link2LinkItem(response.meta['item'])\n item['Brand'] = hxs.select(\"//*[@id='contentFull']/h1/text()\").extract()\n catatory_link = all_catagories_links[index]\n item['Products_Category'] = product\n index = index + 1\n # yield item\n yield Request(self.get_url(catatory_link), callback=self.parse_cats, meta={'item': item})\n\n else:\n\n #direct product link is available.\n item['Brand'] = hxs.select(\"//*[@id='contentFull']/h1/text()\").extract()\n if \"Products_Category\" not in item:\n item['Products_Category'] = \"Not Available\"\n all_product_links = hxs.select(\"//div[@id='contentFull']/fieldset[2]/div/p/a/@href\").extract()\n\n for product_link in all_product_links:\n yield Request(self.get_url(product_link), callback=self.parse_products, meta={'item': item})\n\n def parse_cats(self, response):\n\n hxs = HtmlXPathSelector(response)\n item = Link2LinkItem(response.meta['item'])\n all_product_links = hxs.select(\"//div[@id='contentFull']/fieldset[2]/div/p/a/@href\").extract()\n\n for product_link in all_product_links:\n yield Request(self.get_url(product_link), callback=self.parse_products, meta={'item': item})\n\n\n def parse_products(self, response):\n hxs = HtmlXPathSelector(response)\n item = Link2LinkItem(response.meta[\"item\"])\n\n item['Specification'] = hxs.select(\"//div[@id='tab1']/p/text()\").extract()\n item['Product_Name'] = hxs.select(\"//*[@id='contentFull']/h1/text()\").extract()\n ga = hxs.select(\"//*[@id='tab2']/p/text()\").extract()\n if ga:\n item['Guaranteed_Analysis'] = ga\n else:\n item['Guaranteed_Analysis'] = \"Not Available\"\n\n\n item['Product_Description'] = hxs.select(\".//*[@id='contentFull']/p/text()\").extract()\n yield item\n\n\n # def get_url(self,string):\n # \"\"\"Return complete url\"\"\"\n # return \"http://link2linkco.com/\" + string\n #\n #\n # def parse(self, response):\n # #home page\n #\n #\n # hxs = HtmlXPathSelector(response)\n # brands = hxs.select(\"//div[@id='contentFull']/div/p/a/@href\")\n # # self.item = Link2LinkItem()\n # for brand in brands:\n # item = Link2LinkItem()\n # response.meta[\"item\"] = item\n # brand_page = brand.extract()\n # # print self.complete_url(brand_page)\n # yield Request(self.get_url(brand_page), callback=self.parse_brands)\n #\n #\n #\n #\n # def parse_brands(self, response):\n # hxs = HtmlXPathSelector(response)\n # brand_name = hxs.select(\"//*[@id='contentFull']/h1/text()\").extract()\n #\n #\n # response.meta['item']\n #\n # brands = hxs.select(\"//div[@id='contentFull']/fieldset[2]/div/p/a/@href\")\n # for brand in brands:\n # item = Link2LinkItem(response.meta[\"item\"])\n # item['Brand'] = brand_name\n # brand_link = brand.extract()\n # if \"Products\" in hxs.select(\"(//legend)[2]/text()\").extract()[0]:\n # yield Request(self.get_url(brand_link), callback=self.parse_catatories)\n # yield Request(self.get_url(brand_link), callback=self.parse_products)\n #\n # def parse_catatories(self, response):\n # hxs = HtmlXPathSelector(response)\n # catatories = hxs.select(\"//*[@id='contentFull']/fieldset[2]/div/p[2]/a/@href\")\n # products_category = hxs.select(\"(//legend)[2]/text()\").extract()\n # item = Link2LinkItem(response.meta[\"item\"])\n # item['Products_Category'] = products_category\n #\n # for catagory in catatories:\n # yield Request(self.get_url(catagory.extract()), callback=self.parse_brands)\n #\n #\n #\n # def parse_products(self, response):\n # print self.hashmap\n # hxs = HtmlXPathSelector(response)\n # item = Link2LinkItem(response.meta[\"item\"])\n # name = hxs.select(\".//*[@id='contentFull']/h1/text()\").extract()\n #\n # # print name\n #\n # # self.item['Brand'] = self.brand\n # # self.item['Products_Category'] = self.products_category\n # item['Product_Name'] = name\n #\n # yield item\n #\n #\n #\n # rules = (Rule(SgmlLinkExtractor(allow=(),\n # restrict_xpaths=(\"//div[@id='contentFull']/div/p/a\",)\n # ),\n # follow = True,\n # callback = \"get_brand_details\"),\n # Rule(SgmlLinkExtractor(allow=(),\n # restrict_xpaths=(\"//div[@id='contentFull']/fieldset[2]/div/p/a\",)\n # ),\n # follow = True,\n # callback = \"parse_detail\"),\n # )\n #\n #\n # def get_brand_details(self, response):\n # print \"in get brand details\"\n # hxs = HtmlXPathSelector(response)\n # self.brand_name = hxs.select(\"//div[@id='contentFull']/h1/text()\").extract()\n # print self.brand_name\n #\n # def parse_detail(self, response):\n # hxs = HtmlXPathSelector(response)\n # specification = hxs.select(\"//div[@id='tab1']/p/text()\").extract()\n # catagory = hxs.select(\"//*[@id='contentFull']/fieldset[2]/div[1]/p[2]/a\")\n # self.item['Specification'] = specification\n # self.item['Brand'] = self.brand_name\n # return self.item\n\n\n\n\ncmdline.execute(\"scrapy crawl link2links\".split())","sub_path":"link2link/link2link/spiders/fetch_details.py","file_name":"fetch_details.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"310608794","text":"import re\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\n\nfrom . import settings\nfrom .models import *\nfrom .resources.admin_strings import strings\n\nS = strings[settings.lang]\n\ndef _stripval(d):\n new = {}\n for k in d.keys():\n new[k] = d[k].strip()\n return new\n\ndef _get_global_context():\n categories = Category.objects.order_by('order')\n return {\n 'global_info': settings.global_info,\n 'categories': categories,\n 'template_base': settings.skinpath + '/template.html',\n 'string': S,\n }\n\n@staff_member_required\ndef index(request):\n context = _get_global_context()\n context.update({\n 'count': [\n len(Article.objects.all()),\n len(Category.objects.all()),\n len(Tag.objects.all())\n ]\n })\n return render(request, 'miniblog/admin/global.html', context)\n\n@staff_member_required\ndef article(request):\n context = _get_global_context()\n\n articles = list(Article.objects.all())\n if request.GET.get('sort_by'):\n sortby = request.GET['sort_by']\n if sortby in ['date_created', 'date_modified']:\n articles = Article.objects.order_by('-' + sortby)\n elif sortby == 'id':\n articles = Article.objects.order_by('id')\n elif sortby == 'category':\n articles.sort(key=lambda a: a.category.url_id if a.category else '')\n elif sortby == 'tagcount':\n articles.sort(key=lambda a: len(a.tags.all()))\n\n context['articles'] = articles\n return render(request, 'miniblog/admin/article.html', context)\n\n@staff_member_required\ndef category(request):\n context = _get_global_context()\n return render(request, 'miniblog/admin/category.html', context)\n\n@staff_member_required\ndef tag(request):\n context = _get_global_context()\n\n tags = list(Tag.objects.all())\n for i, t in enumerate(tags):\n if t.article_count() == 0:\n t.delete()\n del tags[i]\n\n tags = sorted(\n tags,\n key=lambda x: x.article_count(),\n reverse=True\n )\n context['tags'] = tags\n\n return render(request, 'miniblog/admin/tag.html', context)\n\n@staff_member_required\ndef new_article(request, **kw):\n context = _get_global_context()\n\n modify = True if 'article' in kw.keys() else False\n\n if modify:\n article = Article.objects.get(pk=kw['article'])\n\n def setheader(m):\n if m:\n context.update({\n 'header_text': S['editor']['header_modify'],\n 'guide_text': S['editor']['guide_modify'],\n 'article': article,\n 'article_tag_text': ','.join([t.name for t in article.tags.all()])\n })\n else:\n context.update({\n 'header_text': S['editor']['header_new'],\n 'guide_text': S['editor']['guide_new']\n })\n\n if request.method == 'GET':\n context['redirect'] = request.GET.get('redirect')\\\n if 'redirect' in request.GET else ''\n setheader(modify)\n return render(request, 'miniblog/admin/new_article.html', context)\n\n elif request.method == 'POST':\n val = _stripval(request.POST)\n\n def error(message):\n context.update({\n 'error_message': message,\n 'prev_title': val['title'],\n 'prev_text': val['text'],\n })\n setheader(modify)\n return render(request, 'miniblog/admin/new_article.html', context)\n\n # 필수 필드가 비어있을 경우 오류 메시지와 함께 양식을 다시 표시함\n if val['title'] == '' or val['text'] == '':\n return error(S['editor']['error_empty'])\n\n pattern = re.compile(r'[/]')\n if bool(pattern.search(val['tags'])):\n return error(S['editor']['error_invalid_tag'])\n\n now = timezone.now()\n if not modify:\n article = Article()\n article.date_created = now\n article.date_modified = now\n article.title = val['title']\n article.text = val['text']\n article.published = True if val['published'] == 'yes' else False\n article.save()\n\n if val['category'] == '':\n article.category = None\n else:\n category = Category.objects.get(url_id=val['category'])\n article.category = category\n\n tag_text = val['tags']\n tags = [t for t in re.split(r',\\s*', tag_text) if len(t) != 0]\n tag_list = []\n for t in tags:\n try:\n tag_list.append(Tag.objects.get(name=t))\n except Tag.DoesNotExist:\n newtag = Tag(name=t, date_created=now)\n newtag.save()\n tag_list.append(newtag)\n article.tags = tag_list\n\n article.save()\n\n if val['redirect']:\n return HttpResponseRedirect(val['redirect'])\n else:\n return HttpResponseRedirect(reverse('miniblog:admin:article'))\n\n@staff_member_required\ndef delete_article(request, **kw):\n if request.method == 'POST':\n article = get_object_or_404(Article, pk=request.POST['id'])\n article.delete()\n return HttpResponseRedirect(reverse('miniblog:admin:article'))\n\n else:\n return HttpResponse(status=405)\n\n@staff_member_required\ndef category_details(request, **kw):\n context = _get_global_context()\n\n modify = True if 'cat_id' in kw.keys() else False\n\n if modify:\n category = get_object_or_404(Category, url_id=kw['cat_id'])\n articles = category.article_set.order_by('-date_created')\n\n if request.method == 'GET':\n if modify:\n context.update({\n 'category': category,\n 'articles': articles,\n 'header_text': S['category']['details']['header_modify'].format(category.name)\n })\n else:\n context['header_text'] = S['category']['details']['header_create']\n return render(request, 'miniblog/admin/category_details.html', context)\n\n elif request.method == 'POST':\n response = ''\n val = _stripval(request.POST)\n\n def error(message):\n context.update({\n 'error_message': message,\n 'prev_urlid': val['url_id'] if not modify else '',\n 'prev_name': val['name'],\n 'prev_description': val['description'],\n })\n if modify:\n context.update({\n 'category': category,\n 'articles': articles,\n })\n return render(request, 'miniblog/admin/category_details.html', context)\n\n # 필수 필드가 비어있을 경우 오류 메시지와 함께 양식을 다시 표시함\n if (not modify and val['url_id'] == '')\\\n or val['name'] == '':\n return error(S['category']['details']['error_empty'])\n\n # url_id에 부적절한 값이 있을 경우 오류 표시\n pattern = re.compile(r'[^_0-9A-Za-z]')\n if not modify and\\\n (bool(pattern.search(val['url_id']))):\n return error(S['category']['details']['error_invalid'])\n\n if not modify:\n category = Category()\n category.url_id = val['url_id']\n category.name = val['name']\n category.description = val['description']\n\n if len(Category.objects.all()) == 0:\n category.order = 0\n else:\n category.order = Category.objects.order_by('-order')[0].order + 1\n\n category.save()\n\n return HttpResponseRedirect(reverse('miniblog:admin:category'))\n\n else:\n return HttpResponse(status=405)\n\n@staff_member_required\ndef reorder_category(request, **kw):\n if request.method == 'POST':\n context = _get_global_context()\n def error(message):\n context.update({\n 'error_message': message\n })\n return render(request, 'miniblog/admin/category.html', context)\n order_s = request.POST['order']\n try:\n order = [int(x) for x in order_s.split(',')]\n except ValueError:\n return error(S['category']['reorder_error_invalid'])\n\n if len(order) != len(set(order)):\n return error(S['category']['reorder_error_duplicate'])\n\n categories = Category.objects.order_by('order')\n for c, o in zip(categories, order):\n c.order = o\n c.save()\n\n return HttpResponseRedirect(reverse('miniblog:admin:category'))\n\n else:\n return HttpResponse(status=405)\n\n@staff_member_required\ndef delete_category(request, **kw):\n if request.method == 'POST':\n category = get_object_or_404(Category, url_id=kw['cat_id'])\n category.delete()\n return HttpResponseRedirect(reverse('miniblog:admin:category'))\n\n else:\n return HttpResponse(status=405)\n\n@staff_member_required\ndef tag_details(request, **kw):\n context = _get_global_context()\n\n if request.method == 'GET':\n tag = Tag.objects.get(name=kw['tag'])\n articles = tag.article_set.order_by('-date_created')\n context.update({\n 'tag': tag,\n 'articles': articles,\n 'header_text': S['tag']['details']['header_modify'].format(tag.name)\n })\n return render(request, 'miniblog/admin/tag_details.html', context)\n\n elif request.method == 'POST':\n val = _stripval(request.POST)\n tag = Tag.objects.get(name=kw['tag'])\n tag.description = val['description']\n tag.save()\n\n return HttpResponseRedirect(reverse('miniblog:admin:tag'))\n\n else:\n return HttpResponse(status=405)\n","sub_path":"admin_views.py","file_name":"admin_views.py","file_ext":"py","file_size_in_byte":10002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"37835489","text":"\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy\n\n\nimage1 = cv2.imread('dark.jpg')\nimage2 = cv2.imread('mid.jpg')\nimage3 = cv2.imread('light.jpg')\n\ngray_image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\ngray_image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)\ngray_image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2GRAY)\n\n\ncv2.imshow(\"dark\",gray_image1)\ncv2.imshow(\"mid\",gray_image2)\ncv2.imshow(\"light\",gray_image3)\ncv2.waitKey(0)\nhistogram1 = cv2.calcHist([gray_image1], [0], None, [256], [0, 256])\nhistogram2= cv2.calcHist([gray_image2], [0], None, [256], [0, 256])\nhistogram3 = cv2.calcHist([gray_image3], [0], None, [256], [0, 256])\n\nplt.plot(histogram1, color='k')\nplt.show()\nplt.plot(histogram2, color='k')\nplt.show()\nplt.plot(histogram3, color='k')\nplt.show()\n\n\n","sub_path":"histograms/gray_hist.py","file_name":"gray_hist.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"357802818","text":"\r\nfrom django.shortcuts import render_to_response, redirect\r\nfrom django.contrib import auth\r\nfrom themes.models import Theme\r\nfrom registration.models import UserProfile\r\nfrom forum.forms import UploadImageForm\r\n\r\n\r\ndef main_page(request):\r\n\tonline=UserProfile.objects.filter(userprofile_online=True)\r\n\tthemes = Theme.objects.all()\r\n\targs = {}\r\n\targs['username'] = auth.get_user(request).username\r\n\targs['all_themes']=themes[::-1]\r\n\targs['users_online']=online\r\n\treturn render_to_response('Main.html', args)\r\n\t\r\ndef profile(request):\r\n\tcurrent_user = auth.get_user(request)\r\n\tuserprofile=UserProfile.objects.get(userprofile_user=current_user)\r\n\t\r\n\targs = {}\r\n\targs['username'] = auth.get_user(request).username\r\n\targs['email'] = auth.get_user(request).email\r\n\targs['date_of_registration'] = userprofile.userprofile_regdate\r\n\targs['user_themes'] = Theme.objects.filter(theme_author=current_user)\r\n\targs['total_comments'] = userprofile.userprofile_counter\r\n\targs['avatar'] = userprofile.userprofile_avatar\r\n\t\r\n\treturn render_to_response('Profile.html', args)\r\n\r\ndef avatar_adding(request):\r\n\tform = UploadImageForm\r\n\targs={}\r\n\targs['form']=form\r\n\treturn render_to_response('Upload.html', args)\r\n\t\r\ndef add_avatar(request):\r\n\t\r\n\tif request.method=='POST':\r\n\t\tcurrent_user = auth.get_user(request)\r\n\t\tuserprofile=UserProfile.objects.get(userprofile_user=current_user)\t\t\r\n\t\tform = UploadImageForm(request.POST, request.FILES, instance=userprofile)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\t\r\n\treturn redirect('/forum/profile/')\r\n\r\n\r\n\t\r\n\t\r\n\t","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"319233949","text":"from tkinter import *\nimport modele\nDIM = 30\nCOULEURS = [\"red\",\"blue\",\"green\",\"yellow\",\"orange\",\"purple\",\"pink\",\n \"dark grey\",\"black\"]\nx1= modele.ModeleTetris()\nclass VueTetris:\n\n def __init__(self, ModeleTetris):\n self.__modele = ModeleTetris\n self.__fenetre= Tk()\n self.__fenetre.title(\"Tetris\")\n self.__can_terrain = Canvas(self.__fenetre, width =self.__modele.get_largeur()*DIM, height =self.__modele.get_hauteur()*DIM)\n self.__can_terrain.pack(side ='left')\n frame = Frame(self.__fenetre)\n\n self.__bundleScore = StringVar()\n self.__bundleScore=\"Score : \"+str(self.__modele.get_score())\n self.__lbl_score= Label(frame, textvariable=self.__bundleScore)\n self.__lbl_score.pack()\n btn_quitter = Button(frame, text=\"quitter\" , command = self.__fenetre.destroy)\n btn_quitter.pack()\n frame.pack(side ='right')\n self.__les_cases = []\n \n for i in range(self.__modele.get_hauteur()):\n liste =[]\n for j in range( self.__modele.get_largeur()):\n liste.append(self.__can_terrain.create_rectangle(j*DIM,i*DIM,DIM*self.__modele.get_largeur(),DIM*self.__modele.get_hauteur(),outline =\"grey\", fill= COULEURS[self.__modele.get_valeur(i,j)]))\n self.__les_cases.append(liste)\n \n \n def fenetre(self):\n return self.__fenetre\n\n def dessine_case(self,i,j,coul):\n self.__can_terrain.itemconfigure(self.__les_cases[i][j],fill = COULEURS[coul])\n\n def dessine_terrain(self):\n for i in range(0,self.__modele.get_hauteur()):\n ligne =[]\n for j in range(0,self.__modele.get_largeur()):\n self.dessine_case(i,j,self.__modele.get_valeur(i,j))\n\n def dessine_forme(self, coords, couleur):\n for i in coords :\n self.dessine_case(i[1],i[0], couleur)\n\n\n \n","sub_path":"vue.py","file_name":"vue.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"538309384","text":"#!/usr/bin/env python3\n# coding:utf-8\n\nimport os\nimport sys\nimport json\nimport numpy as np\nfrom Levenshtein import *\n\nrecogFile = sys.argv[1]\nannotFile = sys.argv[2]\n#recogFile = \"coco_test.txt\"\n#annotFile = \"/data/coco-text/coco_annot.json\"\n\nannot = json.load(open(annotFile, 'r'))\ntotalLD = 0.0\ntotalLen = 0.0\naccs = []\n\nwith open(recogFile, 'r') as fin:\n for line in fin:\n recogText = \"\"\n try:\n imgFile, recogText = line.strip().split(\"\\t\")\n except:\n imgFile = line.strip()\n imgFile = os.path.basename(imgFile)\n imgAnnots = annot[imgFile]['annotations']\n annotString = ''\n for imgAnnot in imgAnnots:\n annotString += imgAnnot['utf8_string'].lower()\n totalLen += (len(imgAnnot['utf8_string']))\n ld = distance(recogText, annotString)\n annotLen = len(annotString)\n acc = float(annotLen - ld) / annotLen\n totalLD += ld\n accs.append(acc)\n\naveAccRate = np.mean(accs)\ntotalAccRate = (totalLen - totalLD) / totalLen\nprint(\"file:{}\".format(recogFile))\nprint(\"average accuracy:{}\".format(aveAccRate))\nprint(\"total accuracy:{}\".format(totalAccRate))\n","sub_path":"generate_tfrecord/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"364247396","text":"import torch\nimport torch.utils.data as data\nimport os\nimport cv2\nfrom google.colab.patches import cv2_imshow\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nimport torch.optim as optim\nimport tqdm\nfrom PIL import Image\nimport numpy as np\n\nimport torch.nn as nn \nfrom torch.nn.functional import mse_loss as mse \n\n# Change to your data root directory\nroot_path = \"/content/\"\n# Depend on runtime setting\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \n\ntrain_dataset = ColorHintDataset(root_path, 128)\ntrain_dataset.set_mode(\"training\")\n\nval_dataset = ColorHintDataset(root_path, 128)\nval_dataset.set_mode(\"validation\")\n\ntrain_dataloader = data.DataLoader(train_dataset, batch_size=4, shuffle=True)\nval_dataloader = data.DataLoader(val_dataset, batch_size=4, shuffle=True)\n\n# ================== define helper function ==================\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n def reset(self):\n self.val, self.avg, self.sum, self.count = 0, 0, 0, 0\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n# ================== define train and validation ==================\n\ndef train(model, train_dataloader, optimizer, criterion, epoch):\n print('[Training] epoch {} '.format(epoch))\n model.train()\n losses = AverageMeter()\n \n for i, data in enumerate(train_dataloader):\n \n # if use_cuda:\n l = data[\"l\"].cuda()\n ab = data[\"ab\"].cuda()\n hint = data[\"hint\"].cuda()\n mask = data[\"mask\"].cuda()\n \n # concat\n gt_image = torch.cat((l, ab), dim=1).cuda()\n #print('\\n===== img size =====\\n', gt_image.shape)\n hint_image = torch.cat((l, hint, mask), dim=1).cuda()\n #print('===== hint size =====\\n', hint_image.shape)\n\n # run forward\n output_ab = model(hint_image)\n loss = criterion(output_ab, gt_image)\n losses.update(loss.item(), hint_image.size(0))\n\n # compute gradient and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if i%100==0:\n print('Train Epoch : [{}] [{} / {}]\\tLoss{loss.val:.4f}'.format(epoch, i, len(train_dataloader),loss=losses))\n\n\ndef validation(model, train_dataloader, criterion, epoch):\n model.eval()\n losses = AverageMeter()\n \n for i, data in enumerate(val_dataloader):\n \n # if use_cuda:\n l = data[\"l\"].cuda()\n ab = data[\"ab\"].cuda()\n hint = data[\"hint\"].cuda()\n mask = data[\"mask\"].cuda()\n\n # concat\n gt_image = torch.cat((l, ab), dim=1).cuda()\n #print('\\n===== img size =====\\n', gt_image.shape)\n hint_image = torch.cat((l, hint, mask), dim=1).cuda()\n #print('===== hint size =====\\n', hint_image.shape)\n\n # run model and store loss\n output_ab = model(hint_image)\n loss = criterion(output_ab, gt_image)\n losses.update(loss.item(), hint_image.size(0))\n \n gt_np = tensor2im(gt_image)\n #print('\\n===== gt size =====\\n', gt_np.shape)\n hint_np = tensor2im(output_ab)\n #print('===== hint size =====\\n', hint_np.shape)\n\n gt_bgr = cv2.cvtColor(gt_np, cv2.COLOR_LAB2BGR)\n hint_bgr = cv2.cvtColor(hint_np, cv2.COLOR_LAB2BGR)\n \n os.makedirs('/content/predictions',exist_ok=True)\n cv2.imwrite('/content/predictions/pred_'+str(i)+'.jpg',hint_bgr)\n\n os.makedirs('/content/gt',exist_ok=True)\n cv2.imwrite('/content/gt/gt_'+str(i)+'.jpg',gt_bgr)\n\n if i%100==0:\n print('Validation Epoch : [{} / {}]\\tLoss{loss.val:.4f}'.format(i, len(val_dataloader),loss=losses))\n\n cv2_imshow(gt_bgr)\n cv2_imshow(hint_bgr)\n \n return losses.avg\n\n# ================== define psnr and psnr_loss ==================\n\ndef psnr(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:\n if not isinstance(input, torch.Tensor):\n raise TypeError(f\"Expected torch.Tensor but got {type(target)}.\")\n\n if not isinstance(target, torch.Tensor):\n raise TypeError(f\"Expected torch.Tensor but got {type(input)}.\")\n\n if input.shape != target.shape:\n raise TypeError(f\"Expected tensors of equal shapes, but got {input.shape} and {target.shape}\")\n\n return 10. * torch.log10(max_val ** 2 / mse(input, target, reduction='mean'))\n\ndef psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:\n return -1. * psnr(input, target, max_val)\n\n# ================== class PSNRLoss ================== \n\nclass PSNRLoss(nn.Module):\n def __init__(self, max_val: float) -> None:\n super(PSNRLoss, self).__init__()\n self.max_val: float = max_val\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n return psnr_loss(input, target, self.max_val)\n\n# ====================================================\n\nmodel = UnetGenerator()\ncriterion = PSNRLoss(2.)\n# criterion = nn.BCELoss()\n# criterion = nn.MSELoss()\n# criterion = nn.BCEWithLogitsLoss()\n# criterion = nn.CrossEntropyLoss()\n\noptimizer = optim.Adam(model.parameters(), lr=0.00025) # 1e-2 # 0.0005 # 0.00025 # 0.0002\nepochs = 150 \nbest_losses = 10\n\nsave_path = './Result'\nos.makedirs(save_path, exist_ok=True)\noutput_path = os.path.join(save_path, 'validation_model.tar')\n\nmodel.cuda()\n\nfor epoch in range(epochs):\n train(model, train_dataloader, optimizer, criterion, epoch)\n with torch.no_grad():\n val_losses = validation(model, val_dataloader, criterion, epoch)\n\n if best_losses > val_losses:\n best_losses = val_losses\n torch.save(model.state_dict(), '/content/drive/MyDrive/Myungji/PSNR/PSNR-epoch-{}-losses-{:.5f}.pth'.format(epoch + 1, best_losses))\n \n","sub_path":"Train/PSNR.py","file_name":"PSNR.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"381882106","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/13 11:00\n# @Author : Mo\n# @File : excercise_1_PENGHIAS.py\n\n\n\"\"\"\n1.写函数,用户传入修改的文件名,与要修改的内容,执行函数,\n完成批了修改操作\n\"\"\"\n\n\ndef file_amend(file_name, content):\n with open(file_name, 'w') as f:\n f.write(content)\n\n\n\"\"\"\n2.写函数,计算传入字符串中【数字】、【字母】、【空格】\n以及【其他】的个数\n\"\"\"\n\n\ndef symbol_number(content):\n digit_num = 0\n letter_num = 0\n space_num = 0\n rest_num = 0\n for i in content:\n if i.isdigit():\n digit_num += 1\n elif i.isalpha():\n letter_num += 1\n elif i.isspace():\n space_num += 1\n else:\n rest_num += 1\n print('digit_num=%d,letter_num=%d,space_num=%d,rest_num=%d' % (digit_num, letter_num, space_num, rest_num))\n\n\n\"\"\"\n3.写函数,判断用户传入的对象(字符串、列表、元组)长度是\n否大于5。\n\"\"\"\n\n\ndef len_judge(content):\n if len(content) > 5:\n return True\n else:\n return False\n\n\n\"\"\"\n4.写函数,检查传入列表的长度,如果大于2,那么仅保留前两个\n长度的内容,并将新内容返回给调用者。\n\"\"\"\n\n\ndef list_check(content):\n if len(content) > 2:\n content = content[:2]\n return content\n\n\n\n\"\"\"\n5.写函数,检查获取传入列表或元组对象的所有奇数位索引对应\n的元素,并将其作为新列表返回给调用者。\n\"\"\"\n\n\ndef odd_number(content):\n new_content = []\n for i in range(len(content)):\n if i % 2 != 0:\n new_content.append(content[i])\n return new_content\n\n\n\"\"\"\n6.写函数,检查字典的每一个value的长度,如果大于2,那么仅保\n留前两个长度的内容,并将新内容返回给调用者。\n\"\"\"\n\n\ndef dict_check(content):\n for key, value in content.items():\n if len(value) > 2:\n content[key] = value[:2]\n return content\ndict1 = {\"k1\": \"v1v1\", \"k2\": [11, 22, 33, 44]}\n","sub_path":"second_stage/chapter_6/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"397797106","text":"import numpy as np\n\nclass LogisticRegresion:\n\n def __init__(self, lr = 0.00001, nr_iter = 1000):\n self.lr = lr\n self.nr_iter = nr_iter\n self.weight = None\n self.bais = None\n\n def fit(self, x, y):\n n_samples, n_features = x.shape\n self.weight = np.zeros(n_features)\n self.bais = 0\n\n for _ in range(self.nr_iter):\n #sigmoid function\n e_pow = np.dot(x, self.weight) + self.bais\n y_pred = 1 / (1 + np.exp(-e_pow))\n\n dw = (1 / n_samples) * np.dot(x.T, (y_pred - y))\n db = (1 / n_samples) * np.sum(y_pred - y)\n\n self.weight -= self.lr * dw\n self.bais -= self.lr * db\n\n def predict(self, x):\n e_pow = np.dot(x, self.weight) + self.bais\n y_pred = 1 / (1 + np.exp(-e_pow))\n y_pred_cls = [1 if prob > 0.5 else 0 for prob in y_pred]\n return y_pred_cls\n\n\n","sub_path":"Logistic Regression/log_reg.py","file_name":"log_reg.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"40805844","text":"import pybamm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\n\n\nparameters = [\"Marquis2019\", \"Ecker2015\", \"Ramadass2004\", \"Chen2020\"]\n\nmodels = {\"SPM\": pybamm.lithium_ion.SPM(), \"DFN\": pybamm.lithium_ion.DFN()}\n\nabstols = [\n 0.0001,\n 1.0e-5,\n 1.0e-6,\n 1.0e-7,\n 1.0e-8,\n 1.0e-9,\n 1.0e-10,\n 1.0e-11,\n 1.0e-12,\n 1.0e-13,\n]\n\nsolvers = {\n \"IDAKLUSolver\": pybamm.IDAKLUSolver(),\n \"Casadi - safe\": pybamm.CasadiSolver(),\n \"Casadi - fast\": pybamm.CasadiSolver(mode=\"fast\"),\n}\n\n\nfig, axs = plt.subplots(len(solvers), len(models), figsize=(8, 10))\n\nfor ax, i, j in zip(\n axs.ravel(),\n itertools.product(solvers.values(), models.values()),\n itertools.product(solvers, models),\n):\n for params in parameters:\n time_points = []\n solver = i[0]\n\n model = i[1].new_copy()\n c_rate = 1\n tmax = 3500 / c_rate\n nb_points = 500\n t_eval = np.linspace(0, tmax, nb_points)\n geometry = model.default_geometry\n\n # load parameter values and process model and geometry\n param = pybamm.ParameterValues(params)\n param.process_model(model)\n param.process_geometry(geometry)\n\n # set mesh\n var_pts = {\n \"x_n\": 20,\n \"x_s\": 20,\n \"x_p\": 20,\n \"r_n\": 30,\n \"r_p\": 30,\n \"y\": 10,\n \"z\": 10,\n }\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n\n # discretise model\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n\n for tol in abstols:\n solver.atol = tol\n solver.solve(model, t_eval=t_eval)\n time = 0\n runs = 20\n for k in range(0, runs):\n solution = solver.solve(model, t_eval=t_eval)\n time += solution.solve_time.value\n time = time / runs\n\n time_points.append(time)\n\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n ax.set_xlabel(\"abstols\")\n ax.set_ylabel(\"time(s)\")\n ax.set_title(f\"{j[1]} with {j[0]}\")\n ax.plot(abstols, time_points)\n\nplt.tight_layout()\nplt.gca().legend(\n parameters,\n loc=\"lower right\",\n)\n\n\nplt.savefig(f\"benchmarks/benchmark_images/time_vs_abstols_{pybamm.__version__}.png\")\n\n\ncontent = f\"# PyBaMM {pybamm.__version__}\\n## Solve Time vs Abstols\\n

\\n\" # noqa\n\nwith open(\"./benchmarks/release_work_precision_sets.md\", \"r\") as original:\n data = original.read()\nwith open(\"./benchmarks/release_work_precision_sets.md\", \"w\") as modified:\n modified.write(f\"{content}\\n{data}\")\n","sub_path":"benchmarks/work_precision_sets/time_vs_abstols.py","file_name":"time_vs_abstols.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"236896699","text":"from models import Model\nfrom bson import ObjectId\n\nModel = Model\n\n\nclass Reply(Model):\n @classmethod\n def valid_names(cls):\n names = super().valid_names()\n names = names + [\n ('content', str, ''),\n ('forum_id', str, 0),\n ('user_id', str, 0),\n ]\n return names\n\n def user(self):\n from .user import User\n u = User.find(self.user_id)\n return u\n\n @classmethod\n def find_replies(cls, **kwargs):\n name = cls.__name__\n kwargs['deleted'] = False\n if 'id' in kwargs:\n kwargs['_id'] = ObjectId(kwargs['id'])\n kwargs.pop('id')\n ds = cls.db[name].find(kwargs).sort([('created_time', 1)])\n l = [cls._new_with_bson(d) for d in ds]\n return l\n\n @classmethod\n def find_join_forum(cls, **kwargs):\n name = cls.__name__\n kwargs['deleted'] = False\n if 'id' in kwargs:\n kwargs['_id'] = ObjectId(kwargs['id'])\n kwargs.pop('id')\n ds = cls.db[name].find(kwargs).sort([('created_time', -1)]).limit(10)\n l = [cls._new_with_bson(d) for d in ds]\n return l\n","sub_path":"models/reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"291563509","text":"from datetime import datetime, timedelta\nimport re\nfrom django.conf import settings\nfrom redis import Redis\nimport django_rq\nfrom fuzzywuzzy import fuzz\nimport django_rq\nfrom contact.models import Contact, ContactErrorMultiple, ContactErrorDuplicate\n\n\ndef check_contact(pk):\n # Debounce the task : wait RQ_DEBOUNCE_DELAY_IN_S seconds before doing it\n # really. Reinit the count down every time it is triggered.\n r = django_rq.get_connection()\n redis_key = 'check_contact_{}'.format(pk)\n job_id = r.get(redis_key)\n if job_id:\n # A job is already running : delete\n queue = django_rq.get_queue()\n job = queue.fetch_job(job_id.decode())\n if job:\n job.delete()\n # enqueue with debounce delay\n scheduler = django_rq.get_scheduler()\n debounce_delay = timedelta(seconds=settings.RQ_DEBOUNCE_DELAY_IN_S)\n j = scheduler.enqueue_in(debounce_delay, 'contact.tasks._check_contact', pk)\n r.set(redis_key, j.id)\n\n\ndef _check_contact(pk):\n django_rq.enqueue('contact.tasks._check_duplicate', pk)\n django_rq.enqueue('contact.tasks._check_multiple', pk)\n\n\ndef _check_duplicate(pk):\n contact = Contact.objects.get(pk=pk)\n family_name = None\n given_name = None\n primary_email = None\n if contact.family_name:\n family_name = contact.family_name\n if contact.given_name:\n given_name = contact.given_name\n if contact.primary_email:\n primary_email = contact.primary_email.email\n\n for c in Contact.objects.exclude(pk=pk):\n family_name_ratio = 0\n given_name_ratio = 0\n email_ratio = 0\n if family_name and c.family_name:\n family_name_ratio = fuzz.ratio(family_name, c.family_name)\n if given_name and c.given_name:\n given_name_ratio = fuzz.ratio(given_name, c.given_name)\n if primary_email and c.primary_email:\n email_ratio = fuzz.ratio(primary_email, c.primary_email.email)\n if (family_name_ratio + given_name_ratio > settings.CONTACT_ERROR_DUPLICATE_NAME_THRES or\n email_ratio > settings.CONTACT_ERROR_DUPLICATE_EMAIL_THRES):\n ContactErrorDuplicate.objects.get_or_create(\n kind='duplicate', old=c, new=contact,\n family_name_ratio=family_name_ratio,\n given_name_ratio=given_name_ratio,\n email_ratio=email_ratio)\n else:\n ContactErrorDuplicate.objects.filter(old=c, new=contact).delete()\n\n\nMULTIPLE_CONTACT_PATTERN = re.compile('.*\\set\\s.*', re.IGNORECASE)\n\ndef _check_multiple(pk):\n contact = Contact.objects.get(pk=pk)\n if re.match(MULTIPLE_CONTACT_PATTERN, contact.given_name):\n ContactErrorMultiple.objects.get_or_create(\n kind='multiple', contact=contact, field_name='given_name')\n","sub_path":"contact/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"13196106","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 18 20:48:44 2020\r\n\r\n@author: Luís\r\n\"\"\"\r\nimport numpy as np\r\n\r\nprint(\"Seja f(x) = ax² + bx + c com a, b e c reais.\")\r\na = float(input(\"Digite o valor de a: \"))\r\nb = float(input(\"Digite o valor de b: \"))\r\nc = float(input(\"Digite o valor de c: \"))\r\n\r\ndef ordemconvergencia(x0,x1,x2,x3):\r\n e3=abs((x3-x2))\r\n e2=abs((x2-x1))\r\n e1=abs((x1-x0))\r\n alpha1 = np.log(e3/e2) #ordemdeconvergencia\r\n alpha2 = np.log(e2/e1)\r\n ordemconvergencia = alpha1/alpha2\r\n lambida = (e3/(e1**ordemconvergencia)) #constade de erro\r\n print(\"Ordem de convergência é: \", ordemconvergencia)\r\n print(\"Constante de erro assintótica é: \", lambida)\r\n return()\r\n\r\ndef newton(f,df,x0,e,maxiter=50):\r\n resultados=[]\r\n \r\n if abs(f(x0)) <= e:\r\n return x0, resultados\r\n print(\"k\\t x0\\t\\t f(x0)\")\r\n k=1\r\n while k<=maxiter:\r\n x1=x0-f(x0)/df(x0)\r\n resultados.append(x1)\r\n print(\"%d\\t%e\\t%e\"%(k,x1,f(x1)))\r\n if abs(f(x1))<=e:\r\n return x1,resultados\r\n x0=x1\r\n k=k+1\r\n print(\"ERRO: Número máximo de iterações atingido\")\r\n return x1,resultados\r\nif __name__ ==\"__main__\":\r\n def f(x):\r\n return a*x**2 + b*x + c\r\n def df(x):\r\n return 2*a*x + b\r\n \r\nraiz, resultados = newton(f,df,1.5,0.0000000001)\r\nordemconvergencia(resultados[-4],resultados[-3],resultados[-2],resultados[-1])\r\n\r\nprint(\"A raiz é: \", raiz)\r\n","sub_path":"metodonewton_convergencia.py","file_name":"metodonewton_convergencia.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"397384950","text":"#-*- coding: utf-8 -*-\n\nimport pyvisa as visa # interface with NI-Visa\nimport time # time handling\n################################\ndef read_only_properties(*attrs):\n \"\"\"\n decorator to make some class variables read-only\n made by oz123 from \n https://github.com/oz123/oz123.github.com/blob/master/media/uploads/readonly_properties.py\n \"\"\"\n def class_rebuilder(cls):\n \"The class decorator example\"\n\n class NewClass(cls):\n \"This is the overwritten class\"\n def __setattr__(self, name, value):\n\n if name not in attrs:\n pass\n elif name not in self.__dict__:\n pass\n else:\n raise AttributeError(\"Can't touch {}\".format(name))\n\n super().__setattr__(name, value)\n\n return NewClass\n\n return class_rebuilder\n\n##########################\n@read_only_properties('id_bk_hex', 'id_bk_dec', 'instr', 'ch1', 'ch2', )\nclass BK4052:\n def __init__(self):\n \"\"\"\n BK4052\n =========\n \n This is a virtual object that represents the arbitrary function \n generator BK4052 and mimics it's behaviour. The user should \n interact with this object in the same fashion he or she interacts \n with the function generator. As it is the case for the real function \n generator, we have independent access to the both channels, which \n are represented by \"ch1\" and \"ch2\".\n \n The instrument has a \"identify\" function that returns the instrument\n information and 3 pyvisa wrapped functions: \"read\", \"write\", \"query\" \n and \"close\" (go to pyvisa documentation for more detail). There is also\n a function \"find_interface\" that automatic find in which USB port\n the function generator is connected.\n \n Usage:\n \n >>> import pylef # import the pylef package\n >>> instrument = pylef.BK4052() # define the instrument\n >>> instrument.idenfify() # idenfity the instrument\n\n The channels are independently defined and accessed. For each one of \n them we can set up the function properties, such as 'frequency' and \n 'peak-to-peak voltage' and many channel attibutes, such as 'inversion',\n 'load impedance' and 'TTL sync output'. We can also, turn the channels\n ON and OFF.\n \n >>> channel1 = instrument.ch1() # define channel 1\n >>> channel1.turn_on() # turn channel 1 ON\n >>> channel1.sync_on() # turn the TTL sync output for channel 1 \n\n The most important is the 'function type', which can be one of those:\n 'SINE', 'SQUARE', 'RAMP', 'PULSE', 'NOISE', 'ARB', 'DC' and each one of \n them are defined by a particular set of properties. Those properties are\n one of: 'frequency', 'Vpp', 'offset', 'phase', 'symmetry', 'duty', 'mean',\n 'stdev', 'delay'. Some of those properties are share by more the one \n function type and some are privative to only one type. For example, 'SINE', \n 'SQUARE', 'RAMP', 'ARB' and 'PULSE' have the 'frequency' and 'Vpp' properties\n while 'noise' type is the only one who has the 'mean' and 'stded' properties. \n\n Usage:\n \n >>> channel1.set_function('ramp') # create a triangular wave\n >>> channel1.set_frequency(100) # set the frequency to 100 Hz \n >>> channel1.set_Vpp(2) # set the peak-to-peak voltage to 2 V\n >>> channel1.set_frequency()\n\n >>> channel2.turn_on() # turn channel 2 ON\n >>> channel2.set_function('noise') # create a noise\n >>> channel2.set_mean(0) # set the average 0 V\n >>> channel2.set_stdev(0.5) # set the standard deviation to 0.5 V \n \n The function \"wave_info\" returns a python dictionay with the particular wave\n information\n\n Usage:\n\n >>> info1 = channel1.wave_info() # current wave information of channel 1\n >>> print(info1['frequency']) # will return 100\n >>> print(info1['type']) # will return ramp\n >>> info2 = channel2.wave_info() # current wave information of channel 2\n >>> print(info2['stdev']) # will return 0.5 \n \"\"\"\n\n self.id_bk_hex = '0xF4ED'; # identificador do fabricante BK em hexadecimal\n self.id_bk_dec = '62701'; # identificador do fabricante BK em hexadecimal\n self.delay_time = 0.5 # time to wait after write and query - BK BUG!\n interface_name = self.find_interface()\n # instrument initialization\n self.instr = visa.ResourceManager().open_resource(interface_name) ## resource name\n self.instr.timeout = 10000 # set timeout to 10 seconds\n #self.instr.delay = 1.0 #delay for query\n self.ch1 = ChannelFuncGen(self.instr, 'CH1', self.write, self.query)\n self.ch2 = ChannelFuncGen(self.instr, 'CH2', self.write, self.query)\n self.instr.chunk_size = 40960 # set the buffer size to 40 kB \n\n def find_interface(self):\n \"\"\" Function to extract the interface name for the BK function generator\"\"\"\n resources = visa.ResourceManager().list_resources()\n instr_n = len(resources)\n if instr_n == 0:\n raise ValueError('Nenhum instrumento foi identificado: \\n Verique se estao' \\\n 'ligados e se o cabo USB foi conectado. Se o problema persistir \\n'\\\n 'desconecte os cabos USB, aguarde 20 segundos e conecte novamente.')\n bk_str = ''\n for resource in resources:\n fab_id = resource.split('::')[1]\n if fab_id == self.id_bk_hex or fab_id == self.id_bk_dec:\n instr = visa.ResourceManager().open_resource(resource)\n instr.timeout = 10000 # set timeout to 10 seconds\n bk_str = instr.query('*IDN?', delay = self.delay_time)\n #instr.write('*IDN?');time.sleep(1.0)\n #bk_str = instr.read()\n #time.sleep(1)\n resource_out = resource\n print(\"Gerador de Funções conectado! Id = \" + bk_str[:-1])\n if bk_str == '':\n raise ValueError('O osciloscopio BK scope nao foi identificado:\\n'\\\n 'Verique se o equipamento está ligado e se o cabo USB \\n'\\\n 'foi conectado. Se o problema persistir, \\n'\\\n 'desconecte o cabo USB, aguarde 20 segundos \\n'\\\n 'e conecte novamente.')\n return resource_out\n \n####### Communications wraps ########\n def identify(self):\n \"\"\" identify the resource\"\"\"\n return self.instr.query('*IDN?')\n#\n def wait(self):\n \"\"\" wait for the task to end \"\"\"\n return self.instr.query('*OPC?', delay = self.delay_time)\n#\n def write(self, msg):\n \"\"\" write into the laser \"\"\"\n write_output = self.instr.write(str(msg)) \n self.wait()\n return write_output \n \n def query(self, msg):\n \"\"\" query into the laser \"\"\"\n return self.instr.query(str(msg), delay = self.delay_time)\n \n def read(self):\n \"\"\" read from the laser \"\"\"\n return self.instr.read() \n# \n def close(self):\n \"\"\" close the instrument \"\"\"\n return self.instr.close()\n\n#######\n@read_only_properties('instrument', 'channel', 'functions', 'other_chan', 'dict_info', 'tag_volts', 'frequency_max', 'frequency_min', 'Vpp_max', 'Vpp_min', 'offset_max', 'offset_min', 'phase_max', 'phase_min', 'symmetry_max', 'symmetry_min', 'duty_max', 'duty_min', 'stdev_max', 'stdev_min', 'mean_max', 'mean_min', 'delay_max', 'delay_min')\nclass ChannelFuncGen:\n def __init__(self, instrument, channel, write, query):\n \"\"\"\n Class for the channels of the function generator\n \"\"\"\n self.query = query\n self.write = write\n self.instr = instrument ## resource name\n self.channel = channel\n self.functions = ['SINE', 'SQUARE', 'RAMP', 'PULSE', 'NOISE', 'ARB', 'DC'] # list of allowed functions\n self.other_chan = {'CH1':'2', 'CH2':'1'}\n self.dict_info = {'WVTP':'type', 'FRQ':'frequency', 'AMP':'Vpp', 'OFST':'offset', 'PHSE':'phase', \n 'DUTY':'duty_cycle', 'SYM':'symmetry', 'DLY':'delay', 'STDEV':'stdev', 'MEAN':'mean', 'PERI':'period', \n\t\t\t 'LLEV':'low_level', 'HLEV':'high_level'}\n self.tag_volts_secs = ['Vpp', 'mean', 'stdev', 'offset', 'low_level', 'high_level', 'period']\n # instrument limits\n self.frequency_max = 5.0e6 # maximum freqeuncy in Hertz\n self.frequency_min = 1.0e-6 # minimum freqeuncy in Hertz\n self.Vpp_max = 20 # maximum peak-to-peak Voltage in V\n self.Vpp_min = 0.0004 # minimum peak-to-peak Voltage in V\n self.offset_max = 10 # maximum offset in V \n self.offset_min = -10 # minimum offset in V\n self.phase_max = 360 # maximum phase in degrees\n self.phase_min = 0 # minimum phase in degrees\n self.symmetry_max = 100 # maximum symmetry in percentage\n self.symmetry_min = 0 # minimum symmetry in percentage\n self.duty_max = 99.9 # maximum duty cycle in percentage\n self.duty_min = 0.1 # minimum duty cycle in percentage\n self.stdev_max = 2.222 # maximum standard deviation in volts\n self.stdev_min = 0.4e-3 # minimum standard deviation in volts\n self.mean_max = 2.222 # maximum mean in volts\n self.mean_min = 0.0 # minimum mean in Voltse\n self.delay_max = 1000 # maximum delay in seconds\n self.delay_min = 0 # minimum duty delay in seconds\n#\n def state(self):\n \"\"\" return the specified channel state \"\"\"\n #return self.instr.query('C' + self.channel[-1] + ':OUTput?').split(' ')[1].split(',')[0]\n return self.query('C' + self.channel[-1] + ':OUTput?').split(' ')[1].split(',')[0]\n# \n def turn_on(self):\n \"\"\" turn the specified channel ON \"\"\"\n self.write('C' + self.channel[-1] + ':OUTput ON')\n return None\n#\n def turn_off(self):\n \"\"\" turn the specified channel OFF \"\"\"\n self.write('C' + self.channel[-1] + ':OUTput OFF')\n return None\n####\n def sync(self):\n \"\"\" return the specified channel sync response \"\"\"\n return self.query('C' + self.channel[-1] + ':SYNC?')\n#\n def sync_on(self):\n \"\"\" turn the specified channel sync ON \"\"\"\n self.write('C' + self.channel[-1] + ':SYNC ON')\n return None\n# \n def sync_off(self):\n \"\"\" turn the specified channel sync OFF \"\"\"\n self.write('C' + self.channel[-1] + ':SYNC OFF')\n return None\n#####\n def load(self):\n \"\"\" return the specified channel load \"\"\"\n return self.query('C' + self.channel[-1] + ':OUTput?')[:-1].split(',')[-1]\n# \n def set_load_hz(self):\n \"\"\" set the channel load to HZ \"\"\"\n return self.write('C' + self.channel[-1] + ':OUTput LOAD,HZ')\n#\n def set_load_50(self):\n \"\"\" set the channel load to 50 Ohms \"\"\"\n return self.write('C' + self.channel[-1] + ':OUTput LOAD,50')\n####\n def invert_on(self):\n \"\"\" turn the specified channel inversion ON\"\"\"\n self.write('C' + self.channel[-1] + ':INVerT ON')\n return None\n# \n def invert_off(self):\n \"\"\" turn the specified channel inversion OFF \"\"\"\n self.write('C' + self.channel[-1] + ':INVerT OFF')\n return None \n#### \n def set_function(self, val):\n \"\"\"set the function at the channel \"\"\"\n val = val.upper() # convert to upper case\n if val in self.functions:\n cmd = 'C' + self.channel[-1] + ':BSWV WVTP,' + val\n self.write(cmd)\n else:\n raise ValueError('The functions must be one of those: ' + ', '.join([l.lower() for l in self.functions]))\n return None\n#\n def set_frequency(self, val):\n \"\"\"set the function generator frequency \"\"\"\n if val <= self.frequency_max and val >= self.frequency_min:\n cmd = 'C' + self.channel[-1] + ':BSWV FRQ,' + str(float(val)) + 'Hz'\n self.write(cmd)\n else: \n raise ValueError(\"The frequency must be between %4.2f uHz and %4.2f MHz\" % (1e6*self.frequency_min, 1e-6*self.frequency_max)) \n return None \n\n def set_Vpp(self, val):\n \"\"\"set the function generator voltage peak-to-peak \"\"\"\n if val <= self.Vpp_max and val >= self.Vpp_min:\n cmd = 'C' + self.channel[-1] + ':BSWV AMP,' + str(float(val)) + 'V'\n self.write(cmd)\n else: \n raise ValueError(\"The Vpp must be between %4.2f V and %4.2f V\" % (self.Vpp_min, self.Vpp_max)) \n return None\n \n def set_offset(self, val):\n \"\"\"set the function generator offset \"\"\"\n if val <= self.offset_max and val >= self.offset_min:\n cmd = 'C' + self.channel[-1] + ':BSWV OFST,' + str(float(val)) + 'V'\n self.write(cmd)\n else: \n raise ValueError(\"The offset must be between %4.2f V and %4.2f V\" % (self.offset_min, self.offset_max)) \n return None \n \n def set_phase(self, val):\n \"\"\"set the function generator phase \"\"\"\n if val <= self.phase_max and val >= self.phase_min:\n cmd = 'C' + self.channel[-1] + ':BSWV PHSE,' + str(float(val))\n self.write(cmd)\n else: \n raise ValueError(\"The phase must be between %4.2f and %4.2f degrees\" % (self.phase_min, self.phase_max)) \n return None\n\n def set_symmetry(self, val):\n \"\"\"set the function generator signal symmetry \"\"\"\n if val <= self.symmetry_max and val >= self.symmetry_min:\n cmd = 'C' + self.channel[-1] + ':BSWV SYM,' + str(float(val))\n self.write(cmd)\n else: \n raise ValueError(\"The symmetry must be between %4.0f and %4.0f percent\" % (self.symmetry_min, self.symmetry_max)) \n return None \n \n def set_duty(self, val):\n \"\"\"set the function generator duty cycle \"\"\"\n if val <= self.duty_max and val >= self.duty_min:\n cmd = 'C' + self.channel[-1] + ':BSWV DUTY,' + str(float(val))\n self.write(cmd)\n else: \n raise ValueError(\"The duty cycle must be between %4.0f and %4.0f percent\" % (self.duty_min, self.duty_max)) \n return None\n#\n def set_mean(self, val):\n \"\"\"set the function generator mean in Volts\"\"\"\n if val <= self.mean_max and val >= self.mean_min:\n cmd = 'C' + self.channel[-1] + ':BSWV MEAN,' + str(float(val)) + 'V'\n self.write(cmd)\n else: \n raise ValueError(\"The noise mean must be between %4.2f V and %4.2f V\" % (self.mean_min, self.mean_max)) \n return None\n#\n def set_stdev(self, val):\n \"\"\"set the noise function generator standard deviation in Volts\"\"\"\n if val <= self.stdev_max and val >= self.stdev_min:\n cmd = 'C' + self.channel[-1] + ':BSWV STDEV,' + str(float(val)) + 'V'\n self.write(cmd)\n else: \n raise ValueError(\"The standard deviation must be between %4.0f V and %4.0f V\" % (self.stdev_min, self.stdev_max)) \n return None\n# \n def set_delay(self, val):\n \"\"\"set the function generator pulse delay in seconds \"\"\"\n if val <= self.delay_max and val >= self.delay_min:\n cmd = 'C' + self.channel[-1] + ':BSWV DLY,' + str(float(val)) + 'S'\n self.write(cmd)\n else: \n raise ValueError(\"The delay must be between %4.0f s and %4.0f s\" % (self.delay_min, self.delay_max)) \n return None\n#\n def wave_info(self, raw_output = False):\n \"\"\"return the wave information for \"channel\". If raw_output = True, the output from the function is returned without processing\"\"\"\n output = self.query('C' + self.channel[-1] + ':BSWV?')\n if not raw_output:\n info = output.split(' ')[-1][:-1].split(',') \n info_tags, info_vals = info[0:][::2], info[1:][::2]\n N = len(info_tags)\n output = {}\n for n in list(range(N)):\n tag = self.dict_info[info_tags[n]]\n if tag in self.tag_volts_secs:\n val = float(info_vals[n][:-1])\n elif tag == 'frequency':\n val = float(info_vals[n][:-2])\n elif tag == 'type': val = info_vals[n].lower()\n else: val = float(info_vals[n])\n output[tag] = val\n return output\n# \n def copy_to(self):\n \"\"\"\n copy the parameters to this channel from the other channel \n \"\"\"\n self.write('PAraCoPy C' + self.other_chan[self.channel] + ',C' + self.channel[-1])\n return None\n# \n def copy_from(self):\n \"\"\"\n copy the parameters from this channel to the other channel \n \"\"\"\n self.write('PAraCoPy C' + self.channel[-1] + ',C' + self.other_chan[self.channel])\n return None\n\n\n","sub_path":"build/lib/pylef/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":17295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"427618202","text":"from sys import stdin\n\nclass Node :\n def __init__(self , data):\n self.data = data\n self.next = None\n\nclass LinkedList :\n @staticmethod\n def make_LL(List) :\n head , tail = None , None\n for ele in List :\n newnode = Node(ele)\n if(head == None) :\n head = newnode\n tail = newnode\n else :\n tail.next = newnode\n tail = newnode\n return head\n @staticmethod\n def Length_LL(head) : \n if(head == None) :\n return 0\n temp = head\n length = 0\n while(temp != None) :\n length += 1\n temp = temp.next\n return length\n @staticmethod\n def Length_LL_rec(head) :\n if(head == None) :\n return 0\n return 1 + LinkedList.Length_LL_rec(head.next)\n \n\nList = [int(element) for element in input().rstrip().split(\" \")]\n\nll = LinkedList.make_LL(List)\nans = LinkedList.Length_LL(ll)\nprint(ans)\nans1 = LinkedList.Length_LL_rec(ll)\nprint(ans1)\n ","sub_path":"DATASTRUCTURESANDALGORITHMS/Python/LinkedList/LengthofLinkedList.py","file_name":"LengthofLinkedList.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"150649793","text":"from imports import *\n\n\nif __name__ == \"__main__\":\n\n mlflow.set_experiment(experiment_name=\"MLflow demo\")\n\n print(\"Loading the data...\")\n data = pd.read_csv(\"cleaned_data.csv\")\n\n X = data.drop(['Response', 'Unnamed: 0', 'ID'], axis=1)\n y = data['Response']\n\n print(\"Splitting the data...\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)\n\n ### Buildig the model\n\n classifier = RandomForestClassifier(n_estimators=600, max_depth=6, criterion='gini')\n classifier.fit(X_train, y_train)\n\n y_pred = classifier.predict(X_test)\n y_proba = classifier.predict_proba(X_test)[:,1]\n\n cm = confusion_matrix(y_test, y_pred)\n\n model_accuracy = accuracy_score(y_test, y_pred)\n\n print(\"Training completed...\")\n print(\"Accuracy : \", model_accuracy)\n print(\"Confusion matrix : \", cm)\n\n ## Tracking the model accuracy\n mlflow.log_metric(\"accuracy\", model_accuracy)\n mlflow.sklearn.log_model(classifier, \"model\")\n \n\n\n","sub_path":"ml_flow_pipeline.py","file_name":"ml_flow_pipeline.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"280884593","text":"import requests\nimport json\nBASE_URL = 'https://api.coinbase.com/v2'\nHEADER = {\"Authorization\" : \"Bearer 544bb8971043d95705eedf2c6985884d3f53fa518c1c4440517497c8a0d58c79\"}\n\ndef get_positions():\n # make a request to coinbase for account info\n account_url = BASE_URL+ '/accounts'\n data = requests.get(account_url, headers=HEADER)\n if(data.json().get('data')):\n data_json = data.json()['data']\n position_dict = {}\n for coin in data_json:\n position_dict[coin['balance']['currency']] = float(coin['balance']['amount'])\n return position_dict\n return {'message': 'Key Expired'}\n\ndef convert_to_usd(coin, quantity):\n convert_url = BASE_URL +'/prices/{}-USD/spot'.format(coin.upper())\n data = requests.get(convert_url, headers=HEADER)\n if(data.json().get('data')):\n data_json = data.json()['data']\n return( float(data_json['amount']) * quantity)\n return {'message': 'Key Expired'}\n\ndef get_holdings(wallet):\n holdings = {}\n for coin in wallet:\n if (wallet[coin] > 0.00):\n holdings[coin] = convert_to_usd(coin, wallet[coin])\n return holdings\n\ndef calculate_total_usd(holdings):\n positions = list(holdings.values())\n current_sum = 0\n for position in positions:\n current_sum += position\n return current_sum\n\nif __name__ == '__main__':\n positions = get_positions()\n holdings = get_holdings(positions)\n print(positions)\n print(holdings)\n print(calculate_total_usd(holdings))","sub_path":"python_scripts/get_portfolio_value.py","file_name":"get_portfolio_value.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"400479919","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nclass Platinum_scraper(scrapy.Spider):\n name = 'Platinum_scraper'\n start_urls = [\n 'http://www.platinumfashionmall.com/directory/s=',\n ]\n\n def parse(self, response):\n for phone_list in response.xpath(\"//div[@class = 'tel light-grey size13 upcase']/text()\").extract():\n phone_list = phone_list.replace(\"-\", \"\")\n try:\n phone_number = re.search(r'\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d', phone_list, re.M|re.I).group()\n yield {\n 'phone_number' : phone_number\n }\n except Exception:\n print(\"Something's wrong!\")\n\n active_page = response.xpath(\"//a[@class = 'num active']/text()\").extract_first()\n next_page = int(active_page) + 1\n next_page_url = \"http://www.platinumfashionmall.com/directory/p-\" + str(next_page) + \"/\"\n print(next_page_url)\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n","sub_path":"MAPS_bot/MAPS_bot/spiders/platinum_scraper.py","file_name":"platinum_scraper.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"442057434","text":"import json\nimport os\nimport pytest\nfrom webserver import app\n\n\ndef get_credentials():\n with open(\"tests/cred.json\") as f:\n fj = json.load(f)\n return fj\n\n\ndef create(s):\n url = os.path.join(\"http://127.0.0.1:8080/api/\", \"user/register\")\n data = get_credentials()\n data = json.dumps(data)\n resp = s.post(url, data=data, headers={\"content-type\": \"application/json\"})\n print(\"create: \" + str(resp))\n\n\ndef login(s):\n url = os.path.join(\"http://127.0.0.1:8080/api/\", \"user/login\")\n data = get_credentials()\n data = json.dumps(data)\n resp = s.post(url, data=data, headers={\"content-type\": \"application/json\"})\n with open(\"response_code_here\", \"w\") as f:\n f.write(str(resp))\n\n\n@pytest.fixture\ndef client():\n test_client = app.test_client()\n create(test_client)\n login(test_client)\n return test_client\n\n\n@pytest.fixture\ndef dataset_url():\n return \"/api/dataset/\"\n\n\n@pytest.fixture\ndef image_url():\n return \"/api/image/\"\n\n\n@pytest.fixture\ndef category_url():\n return \"/api/category/\"\n","sub_path":"backend/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"499516814","text":"# -*- coding: utf-8 -*-\n\nimport host\nimport time\nfrom executor import Executor\n\ntry:\n from helpers import get_logger\nexcept ImportError:\n from logging import getLogger as get_logger\nlogger = get_logger()\n\n\nclass OutputReactions(Executor):\n \"\"\"\n Args:\n gpios (List[:class: 'TrObject']): Список объектов TrObject укзанных выходов\n delay (int): значение в секундах, 5 сек\n type (str): одно из возможных событий:\n '1.замкнуть,2.разомкнуть,3.замкнуть-разомкнуть,4.разомкнуть-замкнуть,5.замкнуть-замкнуть,6.разомкнуть-разомкнуть'\n \"\"\"\n\n def __init__(self, gpios, delay, reaction_type, *args, **kwargs):\n self.gpios = gpios\n self.delay = delay\n self.logic = {\n 1: (self._set_output_high, self.do_nothing),\n 2: (self._set_output_low, self.do_nothing),\n 3: (self._set_output_high, self._set_output_low),\n 4: (self._set_output_low, self._set_output_high),\n 5: (self.high_high, self.high_high),\n 6: (self.low_low, self.low_low),\n }\n self.first_method = None\n self.last_method = None\n self.open_flag = False\n self.ts_to_come_back = 0\n self.output_logic(reaction_type)\n\n logger.debug(\n \"gpios: %s, delay: %s, reaction_type: %s\",\n self.gpios,\n self.delay,\n reaction_type,\n )\n\n def output_logic(self, oper_type):\n if not isinstance(oper_type, str):\n logger.error(\"Type is not str. Can't initialize logic.\")\n raise ValueError(\"{} is not string!\".format(oper_type))\n reaction_code = oper_type.split(\".\")[0]\n if not reaction_code.isdigit():\n logger.error(\"Reaction code must be digit. Can't initialize logic.\")\n raise ValueError(\n \"Reaction code must be digit. {} is not digit!\".format(reaction_code)\n )\n reaction_code = int(reaction_code)\n _methods = self.logic.get(reaction_code)\n if _methods is None:\n logger.error(\"Can't get associated methods. Can't initialize logic.\")\n raise ValueError(\"Can't get associated methods. Can't initialize logic.\")\n\n self.first_method = _methods[0]\n self.last_method = _methods[1]\n\n logger.debug(\n \"Output logic initialized successful. Methods are: %s, %s\",\n self.first_method.__name__,\n self.last_method.__name__,\n )\n\n def do_nothing(self):\n pass\n\n def _set_output_low(self):\n logger.debug(\"Start setting low\")\n for gpio in self.gpios:\n gpio.set_output_low()\n\n def _set_output_high(self):\n logger.debug(\"Start setting high\")\n for gpio in self.gpios:\n gpio.set_output_high()\n\n def low_low(self):\n # low - high - low\n logger.debug(\"start setting low_high_low operation\")\n self._set_output_low()\n host.timeout(500, self._set_output_high)\n\n def high_high(self):\n # high - low - high\n logger.debug(\"start setting high-low-high operations\")\n self._set_output_high()\n host.timeout(500, self._set_output_low)\n\n def _timer(self):\n if self.ts_to_come_back < time.time():\n logger.debug(\n \"Timer is stopping. %s < %s\", self.ts_to_come_back, time.time()\n )\n self.open_flag = False\n self.last_method()\n self.ts_to_come_back = 0\n else:\n host.timeout(1000, self._timer)\n\n def output_operation(self):\n \"\"\"\n - Если шлагбаум закрыт (open_flag == False), то выполняет первый метод,\n а второй метод запустится когда ts_to_come_back == time.time()\n - Если шлагбаум поднят на данный момент, то first_method -не запускается,\n а ts_to_come_back увеличивается на delay\n - Таймер _timer следит, когда нужно запустить last_method\n\n \"\"\"\n if self.open_flag:\n self.ts_to_come_back += self.delay\n logger.debug(\n \"Increase timer, last operation will be after: %s seconds.\",\n self.ts_to_come_back - int(time.time()),\n )\n return\n else:\n self.ts_to_come_back = int(time.time()) + self.delay\n logger.debug(\"Setup timer, last operation will be after: %s\", self.delay)\n self.open_flag = True\n self.first_method()\n self._timer()\n\n def manual_activation_of_outputs(self):\n logger.debug(\"Manual activation of outputs starting.\")\n host.stats()[\"run_count\"] += 1\n self.output_operation()\n\n def execute(self, *args, **kwargs):\n self.output_operation()\n","sub_path":"scripts/universal/alarm_monitor/resources/reactions/executors/output_reaction.py","file_name":"output_reaction.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"237872098","text":"# -*- coding: UTF-8 -*-\nfrom threading import Lock\nfrom time import time\n\n\nclass FormatRange:\n \"\"\" 请求头范围请求格式。\n\n :param\n format_dict : 范围格式化字典\n\n 范围关键字:\n begin : 请求字节开始\n end : 请求字节结束(不包括当前字节)\n end_with: 请求戒子结束(包括当前字节)\n length : 请求字节长度\n\n 对于HTTP/S:\n format_dict = {'Range': 'bytes={begin}-{end_with}'}\n 将生成请求头域:(若begin=0, end_with=999, length=1000)\n Range: bytes=0-999\n\n \"\"\"\n def __init__(self, format_dict):\n self._full_formats = format_dict\n\n self._query_dict = {}\n self._header_dict = {}\n\n for i, j in self._full_formats.items():\n if i[0] != '&':\n self._header_dict[i] = j\n else:\n self._query_dict[i] = j\n\n @staticmethod\n def _format(r, format_dict):\n ret_dict = format_dict.copy()\n for k, v in format_dict.items():\n begin = r[0]\n end = r[1] or ''\n end_with = r[1] - 1 if r[1] is not None and r[1] > 0 else ''\n length = (r[1] or 0) - r[0]\n ret_dict[k] = v.format(begin=begin, end=end, end_with=end_with, length=length)\n return ret_dict\n\n def get_headers(self, r):\n return self._format(r, self._header_dict)\n\n def get_query(self, r):\n return self._format(r, self._query_dict)\n\n def __iter__(self):\n return iter(self._full_formats.items())\n\n\nclass Timer:\n \"\"\" 简单的计时器。 \"\"\"\n __slots__ = '_start', '_inc', '_end'\n\n def __init__(self, inc_time=0):\n self._start = None\n self._inc = inc_time\n self._end = None\n\n def start(self):\n if not self._start:\n self._end = None\n self._start = time()\n\n def stop(self):\n if self._start:\n self._end = time()\n self._inc += self._end - self._start\n self._start = None\n\n def get_time(self):\n return time() - self._start + self._inc if self._start else self._inc\n\n def clear(self):\n self._inc = 0\n\n\nclass RealtimeSpeed:\n \"\"\" 使用滑动平均算法得到的实时速度。 \"\"\"\n\n def __init__(self, depth=8):\n \"\"\"\n :param\n depth : 滑动平均的深度\n \"\"\"\n self._speed = 0\n self._prv_size = 0\n self._prv_time = None\n self._lock = Lock()\n self._depth = depth\n self._moving_list = [0 for _ in range(depth)]\n\n def is_stopped(self):\n return self._prv_time is None\n\n def start(self, start):\n with self._lock:\n self._prv_size = start\n self._prv_time = time()\n self._speed = 0\n\n def stop(self):\n with self._lock:\n self._prv_time = None\n self._speed = 0\n self._prv_size = 0\n\n def refresh(self, cur_length):\n \"\"\" 刷新实时速度计。\"\"\"\n with self._lock:\n cur_time = time()\n prv = self._prv_time\n if prv is not None:\n incr_time = cur_time - prv\n speed = (cur_length - self._prv_size) / (incr_time or float('inf'))\n self._prv_time = cur_time\n self._prv_size = cur_length\n # 更新滑动平均数据\n self._moving_list.pop()\n self._moving_list.insert(0, speed)\n # 计算滑动平均数据\n self._speed = sum(self._moving_list) / self._depth\n\n def get_speed(self):\n return self._speed if not self.is_stopped() else 0\n\n\n\n","sub_path":"nbdler/downloader/struct/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"210281594","text":"\n#思路:树的下层节点的值一定大于等于上层节点,并且树的根节点是最小值,当树的节点值等于这个最小值时,则往下继续搜索,相当于所有节点组成的值集合中,除去树的根节点的值寻找一个最小值\n#\nclass TreeNode(object):\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n\n\n\n def helper(self,root):\n self.ans = float('inf') #保存之前的节点\n min1 = root.val #根节点是最小的\n\n def dfs(node):\n if node:\n if min1 < node.val < self.ans: #如果当前节点大于最小的根节点并且小于之前保存的最小节点\n self.ans = node.val #更新第二小的节点数值 不需要往下遍历了,因为后面的肯定比该rnode.val大\n elif node.val == min1: #当前节点可最小节点相等,则继续往下遍历。\n dfs(node.left)\n dfs(node.right)\n\n dfs(root)\n return self.ans if self.ans < float('inf') else -1\n\n\n def helper2(self,root):\n one = root.val #one是这棵树的最小值\n self.two = float('inf') #正无穷\n def dfs(node):\n if not node:\n return\n if one < node.val < self.two:\n #大于根节点小于之前的最小值\n self.two = node.val\n elif node.val == one:\n #等于最小值 则往下遍历\n dfs(node.left)\n dfs(node.right)\n else:\n #大于self.two,则无需往下遍历,因为下层节点一定越来越大\n pass\n dfs(root)\n return self.two\n\n\n\n\n\n def helper3(self,root):\n\n def dfs(root):\n\n if not root.left:\n return float('inf')\n\n #如果左边的大于右边的,则第二小的值可能为由边\n if root.left.val>root.right.val:\n second = root.right.val\n third = root.left.val\n if second == root.val: #如果左边的等于root.val,则还得继续往下搜索,\n search = dfs(root.right) #找到右子树最小的 但可能这个最小的比之前兄弟节点上的值要大\n else:\n return second\n elif root.left.val < root.right.val:\n second = root.left.val\n third = root.right.val\n if second == root.val:\n search = dfs(root.left)\n else:\n return second\n else:\n return min(dfs(root.left),dfs(root.right))\n\n if search == root.val:\n return third\n second = min(search,third) #比较一边子树的最小值和兄弟节点最小值谁才是这课子树第二小的\n if second == root.val:\n return -1\n return second\n\n\n return dfs(root)\n\n\n\n\nroot = TreeNode(2)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.right.left = TreeNode(3)\nroot.right.right = TreeNode(3)\nroot.left.left=TreeNode(2)\nroot.left.right = TreeNode(2)\ns = Solution()\nprint(s.helper(root))\nprint(s.helper2(root))\nprint(s.helper3(root))\nprint(min(2,2))\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"leetcode/671.py","file_name":"671.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"316846217","text":"def survey_row_to_dict(survey_row):\n \"\"\"Converts a row of survey to dictionary\n\n extracts survey id, line number, x, and z coordinates\n \"\"\"\n row_list = survey_row.split(',')\n row_dict = {}\n row_dict['id'] = int(row_list[0])\n row_dict['line_number'] = int(row_list[1])\n row_dict['x'] = float(row_list[7])\n row_dict['z'] = float(row_list[9])\n return row_dict\n\n\nclass SurveyDict:\n def __init__(self):\n self.lines = {}\n\n def insert_row(self, survey_row):\n row_dict = survey_row_to_dict(survey_row)\n line_number = row_dict['line_number']\n value = row_dict['x'], row_dict['z']\n if line_number in self.lines:\n # The line number has already been added\n self.lines[line_number].append(value)\n else:\n # We need to create this line\n self.lines[line_number] = [value]\n\n\ndef test_survey_row_to_dict():\n import nose.tools\n\n test_row1='39,167,745,36.181844395,-75.750374342,901874.364,274562.126,87.850,467.130,2.106,-36.762,20120214,' \\\n '140811,50891.383'\n test_row2='39,163,745,36.182716687,-75.749096508,901986.082,274662.629,227.220,523.325,-2.423,-39.868,20120214,' \\\n '131725,47845.266'\n d1 = survey_row_to_dict(test_row1)\n test_d1 = {'id': 39, 'line_number': 167, 'x': 87.85, 'z': 2.106}\n nose.tools.eq_(d1, test_d1)\n\n d2 = survey_row_to_dict(test_row2)\n test_d2 = {'id': 39, 'line_number': 163, 'x': 227.22, 'z': -2.423}\n nose.tools.eq_(d2, test_d2)\n","sub_path":"survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"94187493","text":"import urllib.request as urllib\nimport json\nimport os\nimport time\nimport random\n\nprint(\"Opening JSON Data\")\n\ndef isValidCard(card):\n\treturn 'token' not in type_line \tand \\\n\t\t'emblem' not in type_line \t\tand \\\n\t\t'planeswalker' not in type_line and \\\n\t\t'scheme' not in type_line\t\tand \\\n\t\tcard['lang'] == 'en' \t\t\tand \\\n\t\t'/' not in card['name'] \t\tand \\\n\t\tnot card['promo']\n\t\t# card['border_color'] is 'black'\n\nwith open('scryfall-all-cards.json', encoding='utf-8') as json_file:\n\tdata = json.load(json_file)\n\n\tprint(\"JSON Data Loaded\")\n\n\t# print(data[0]['image_uris']['art_crop'])\n\t# print(data[2]['color_identity'])\n\n\tlength = len(data)\n\n\t# print( length + \" cards found!\")\n\n\tfor card in data:\n\n\t\t#print(\"Downloaded {progress} of {length}\".format(length=length, progress=))\n\n\t\ttype_line = card['type_line'].lower()\n\n\t\t# Only Black Border cards, We don't use art from emblems, tokens, planeswalkers, split cards pr Promos.\n\t\tif isValidCard(card):\n\n\t\t\tcard_color = card['color_identity']\n\n\n\t\t\tif(len(card_color) < 1):\n\t\t\t\tcard_color = \"colorless\"\n\t\t\telif (len(card_color) == 1):\n\t\t\t\tcard_color = card_color[0]\n\t\t\telse:\n\t\t\t\tcard_color = 'multi'\n\n\t\t\t# We don't deal in multi-colour cards\n\t\t\tif card_color is not 'multi':\n\n\t\t\t\t# Randomly assign the card as test or training data\n\t\t\t\t# rng = random.randint(0, 5)\n\t\t\t\ttest = 'test'\n\t\t\t\t# if rng > 4:\n\t\t\t\t# \ttest = 'validate'\n\n\t\t\t\t# for color_id in card_color_id:\n\n\t\t\t\timage_uri = card['image_uris']['art_crop']\n\n\t\t\t\tdirectory = 'mtg/{test}/{folder}'.format(folder=card_color, test=test)\n\n\t\t\t\tif not os.path.exists(directory):\n\t\t\t\t\tos.mkdir(directory)\n\n\t\t\t\tfile_name = \"{card_name} - {multiverse_id}\".format(card_name=card['name'], multiverse_id=card['id'])\n\n\t\t\t\tcard_file_name = \"{folder}/{card_name}.jpg\".format(card_name=file_name, folder=directory)\n\n\t\t\t\tif not os.path.isfile(card_file_name):\n\t\t\t\t\tprint(\"Downloading: \" + card['name'] + \" - \" + card['id'])\n\t\t\t\t\t# Add a small delay to respect Scryfalls rate limiting requests.\n\t\t\t\t\ttime.sleep(0.2)\n\t\t\t\t\timage = urllib.urlretrieve(image_uri, card_file_name)\n\t\t\t\t\tprint(\"Download Complete\")","sub_path":"download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"269974597","text":"#Make own linear regression\nfrom statistics import mean\nimport numpy as np, random\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use(\"fivethirtyeight\")\n\n#Setting data type to recheck on linear regression\n# xs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)\n# ys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float64)\n\n#Make best fit slope to return m\ndef best_fit_slope_and_break(xs, ys):\n #Based on formula\n m = ( (mean(xs)*mean(ys)) - mean(xs*ys) ) /\\\n ( mean(xs)**2 - mean(xs**2) )\n\n b = mean(ys) - (mean(xs) * m)\n\n #Getting back best slope\n return (m, b)\n\n#Get dataset\n#hm: no of info\n#variance\ndef create_dataset(hm, variance, step=2, correlation=False):\n\n val = 1\n ys = []\n\n #Set y values\n for i in range(hm):\n #Append random y\n y = val + random.randrange(-variance, variance)\n ys.append(y)\n\n if correlation and correlation == \"pos\":\n val += step\n\n if correlation and correlation == \"neg\":\n val -= step\n\n #Set xs\n xs = [i for i in range(len(ys))]\n\n return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)\n\n#Get squared error\n#Difference y original and y line\ndef squared_error(ys_orig, ys_line):\n return sum((ys_line - ys_orig)**2)\n\n#4Get coefficient determination\ndef coefficient_of_determination(ys_orig, ys_line):\n y_mean_line = [mean(ys_orig) for y in ys_orig]\n squared_error_orig = squared_error(ys_orig, ys_line)\n squared_error_y_mean = squared_error(ys_orig, y_mean_line)\n\n return (1 - (squared_error_orig / squared_error_y_mean))\n\nxs, ys = create_dataset(hm=40, variance=1099, step=2, correlation=False)\n\nm, b = best_fit_slope_and_break(xs=xs, ys=ys)\n\nprint(\"Mean is\", m, \" and B is\", b)\n\nregression_line = [(m*x)+b for x in xs]\n\npredict_x = 8\npredict_y = (m * predict_x) + b\nr_squared = coefficient_of_determination(ys_orig=ys, ys_line=regression_line)\n\nprint(\"Squared coefficient:\", r_squared)\n\nplt.scatter(xs, ys)\nplt.scatter(predict_x, predict_y, color=\"g\", s=100)\nplt.plot(xs, regression_line)\nplt.show()","sub_path":"Learning/Machine_Learning/Linear Regression/self_linear_regression.py","file_name":"self_linear_regression.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"438557851","text":"#!/usr/bin/env python\n\n#from __future__ import division\nimport sys\n\nprint(\"Trying to modify pythonpath\")\ncaffe_root = '/work/personal/caffe/'\nsys.path.insert(0, caffe_root + 'python')\n\nimport caffe\nimport numpy as np\n\nprint(\"Imported numpy and caffe\")\n\n# init\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\n\n# caffe.set_mode_cpu()\n\nprint(\"Loading model into memory\")\nsolver = caffe.SGDSolver('solver.prototxt')\nsolver.net.copy_from('/work/personal/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n\nniter = 100000\ntrain_loss = np.zeros(niter)\ntrain_loss_bbox = np.zeros(niter)\n\nf = open('log.txt', 'w')\n\nprint(\"Starting iterations\")\nfor it in range(niter): \n solver.step(1)\n train_loss[it] = solver.net.blobs['loss_class'].data\n train_loss_bbox[it] = solver.net.blobs['loss_bbox'].data\n f.write('Class loss: %0.5f\\t\\tBbox loss: %.5f\\n' % (train_loss[it], train_loss_bbox[it]))\n #f.write('{0: f}\\n'.format(train_loss[it]))\nf.close()\n\nprint(\"Done with iterations\")\n\n# solver.step(80000)\n\n\n","sub_path":"Sec_7/solver_p.py","file_name":"solver_p.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"153878365","text":"import os\nfrom unittest import TestCase\n\nimport requests_mock\n\nfrom ZoneFileDownloader import ZoneFileDownloader\n\n\nclass TestZoneFileDownloader(TestCase):\n def setUp(self):\n self.config_data = {'download_path': '/en/download-zone-data/', 'tlds': {'game': '2601', 'auto': '2381'},\n 'base_url': 'https://czdap.icann.org', 'zone_data_path': 'zonedata',\n 'api_token': 'REPLACE_WITH_API_KEY'}\n\n self.download_urls = {\n 'game': 'https://czdap.icann.org/en/download-zone-data/2601?token={}'.format(self.config_data[\"api_token\"]),\n 'auto': 'https://czdap.icann.org/en/download-zone-data/2381?token={}'.format(self.config_data[\"api_token\"])}\n\n self.successful_zone_fetch = {\"Content-disposition\": \" attachment;\"}\n\n self.zone_data = {\"test\": \"thing\"}\n\n self.zone_file_downloader = ZoneFileDownloader(config_path=os.path.join(\"..\", \"config.yaml\"))\n\n def test_config_loads(self):\n \"\"\"Tests if the YAML config file is being parsed correctly\n and if all key:value pairs are present\n \"\"\"\n self.assertEqual(self.zone_file_downloader.config_data, self.config_data)\n\n def test_download_urls_built(self):\n \"\"\"Checks if the download URL for each zone file is created without\n errors\n \"\"\"\n self.zone_file_downloader.build_download_urls()\n self.assertEqual(self.zone_file_downloader.download_urls, self.download_urls)\n\n @requests_mock.mock()\n def test_fetch_zone_data(self, m):\n self.zone_file_downloader.build_download_urls()\n m.get(\"https://czdap.icann.org/en/download-zone-data/2601?token=REPLACE_WITH_API_KEY\",\n text='{\"Content-disposition\":\"attachment;\"}')\n m.get(\"https://czdap.icann.org/en/download-zone-data/2381?token=REPLACE_WITH_API_KEY\",\n text='{\"Content-disposition\":\"attachment;\"}')\n zone_data = self.zone_file_downloader.download_zone_files()\n for key in zone_data.keys():\n self.assertEqual(zone_data[key], '{\"Content-disposition\":\"attachment;\"}')\n","sub_path":"tests/test_zoneFileDownloader.py","file_name":"test_zoneFileDownloader.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"140492357","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport time\r\n\r\n\r\nclass MyLog(object):\r\n def log(*args, **kwargs):\r\n time_format = '%y-%m-%d %H:%M:%S'\r\n value = time.localtime(int(time.time()))\r\n dt = time.strftime(time_format, value)\r\n with open(LOG_PATH, 'a', encoding='utf-8') as f:\r\n print(dt, *args, file=f, **kwargs)\r\n\r\n\r\n# 文件路径参数配置\r\n# 当前路径\r\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\r\n# 日志文件路径\r\nLOG_PATH = os.path.join(BASE_DIR, 'log.txt')\r\n# 项目图片路径\r\nIMAGE_DIR = os.path.join(BASE_DIR, 'images')\r\nif not os.path.exists(IMAGE_DIR):\r\n os.makedirs(IMAGE_DIR)\r\n# 验证码保存名称\r\nCAPTCHA_NAME = 'captcha.png'\r\n\r\n# 打码平台参数配置\r\n# 接口URL\r\nDYTRY_APIURL = 'http://api.dytry.com/ocr.json'\r\n# 用户名\r\nDYTRY_USERNAME = 'uruntest'\r\n# 用户密码\r\nDYTRY_PASSWORD = '0763!@#'\r\n# 题目类型\r\nDYTRY_TYPEID = 9999\r\n# 软件ID\r\nDYTRY_SOFTID = 1107\r\n# 软件KEY\r\nDYTRY_SOFTKEY = '34af19d2ee35e938dbbdc0336eb730cb'\r\n\r\n# 接口参数配置\r\n# 搜狗验证码识别接口\r\nGetCaptcha_URL = 'http://183.238.76.204:38015/GetCaptcha'\r\n\r\n# 识别验证码方法:1:打码平台,2:验证码识别接口\r\nGETCAPTCHA_TYPE = 2\r\n\r\n# 接口调用selenium次数超过100就重启selenium\r\nSELENIUM_MAX_TIME = 100\r\n","sub_path":"项目代码/windows代码/SougouWeixinAccountUrl/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"103295758","text":"from chainer.serializers import npz\nfrom chainer.training import extension\nfrom chainer.training.extensions import snapshot_writers\nfrom chainer.utils import argument\n\n\ndef snapshot_object(target, filename, savefun=None, **kwargs):\n \"\"\"snapshot_object(target, filename, savefun=None, \\\n*, condition=None, writer=None, snapshot_on_error=False)\n\n Returns a trainer extension to take snapshots of a given object.\n\n This extension serializes the given object and saves it to the output\n directory.\n\n This extension is called once per epoch by default. To take a\n snapshot at a different interval, a trigger object specifying the\n required interval can be passed along with this extension\n to the `extend()` method of the trainer.\n\n The default priority is -100, which is lower than that of most\n built-in extensions.\n\n Args:\n target: Object to serialize.\n filename (str): Name of the file into which the object is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method. For example,\n ``'snapshot_{.updater.iteration}'`` is converted to\n ``'snapshot_10000'`` at the 10,000th iteration.\n savefun: Function to save the object. It takes two arguments: the\n output file path and the object to serialize.\n condition: Condition object. It must be a callable object that returns\n boolean without any arguments. If it returns ``True``, the snapshot\n will be done.\n If not, it will be skipped. The default is a function that always\n returns ``True``.\n writer: Writer object.\n It must be a callable object.\n See below for the list of built-in writers.\n If ``savefun`` is other than ``None``, this argument must be\n ``None``. In that case, a\n :class:`~chainer.training.extensions.snapshot_writers.SimpleWriter`\n object instantiated with specified ``savefun`` argument will be\n used.\n snapshot_on_error (bool): Whether to take a snapshot in case trainer\n loop has been failed.\n\n Returns:\n Snapshot extension object.\n\n .. seealso::\n\n - :meth:`chainer.training.extensions.snapshot`\n \"\"\"\n\n return snapshot(target=target, filename=filename, savefun=savefun,\n **kwargs)\n\n\ndef snapshot(savefun=None,\n filename='snapshot_iter_{.updater.iteration}', **kwargs):\n \"\"\"snapshot(savefun=None, filename='snapshot_iter_{.updater.iteration}', \\\n*, target=None, condition=None, writer=None, snapshot_on_error=False)\n\n Returns a trainer extension to take snapshots of the trainer.\n\n This extension serializes the trainer object and saves it to the output\n directory. It is used to support resuming the training loop from the saved\n state.\n\n This extension is called once per epoch by default. To take a\n snapshot at a different interval, a trigger object specifying the\n required interval can be passed along with this extension\n to the `extend()` method of the trainer.\n\n The default priority is -100, which is lower than that of most\n built-in extensions.\n\n .. note::\n This extension first writes the serialized object to a temporary file\n and then rename it to the target file name. Thus, if the program stops\n right before the renaming, the temporary file might be left in the\n output directory.\n\n Args:\n savefun: Function to save the trainer. It takes two arguments: the\n output file path and the trainer object.\n It is :meth:`chainer.serializers.save_npz` by default.\n If ``writer`` is specified, this argument must be ``None``.\n filename (str): Name of the file into which the trainer is serialized.\n It can be a format string, where the trainer object is passed to\n the :meth:`str.format` method.\n target: Object to serialize. If it is not specified, it will\n be the trainer object.\n condition: Condition object. It must be a callable object that returns\n boolean without any arguments. If it returns ``True``, the snapshot\n will be done.\n If not, it will be skipped. The default is a function that always\n returns ``True``.\n writer: Writer object.\n It must be a callable object.\n See below for the list of built-in writers.\n If ``savefun`` is other than ``None``, this argument must be\n ``None``. In that case, a\n :class:`~chainer.training.extensions.snapshot_writers.SimpleWriter`\n object instantiated with specified ``savefun`` argument will be\n used.\n snapshot_on_error (bool): Whether to take a snapshot in case trainer\n loop has been failed.\n\n Returns:\n Snapshot extension object.\n\n .. testcode::\n :hide:\n\n from chainer import training\n class Model(chainer.Link):\n def __call__(self, x):\n return x\n train_iter = chainer.iterators.SerialIterator([], 1)\n optimizer = optimizers.SGD().setup(Model())\n updater = training.updaters.StandardUpdater(\n train_iter, optimizer, device=0)\n trainer = training.Trainer(updater)\n\n .. admonition:: Using asynchronous writers\n\n By specifying ``writer`` argument, writing operations can be made\n asynchronous, hiding I/O overhead of snapshots.\n\n >>> from chainer.training import extensions\n >>> writer = extensions.snapshot_writers.ProcessWriter()\n >>> trainer.extend(extensions.snapshot(writer=writer), \\\ntrigger=(1, 'epoch'))\n\n To change the format, such as npz or hdf5, you can pass a saving\n function as ``savefun`` argument of the writer.\n\n >>> from chainer.training import extensions\n >>> from chainer import serializers\n >>> writer = extensions.snapshot_writers.ProcessWriter(\n ... savefun=serializers.save_npz)\n >>> trainer.extend(extensions.snapshot(writer=writer), \\\ntrigger=(1, 'epoch'))\n\n This is the list of built-in snapshot writers.\n\n - :class:`chainer.training.extensions.snapshot_writers.SimpleWriter`\n - :class:`chainer.training.extensions.snapshot_writers.ThreadWriter`\n - :class:`chainer.training.extensions.snapshot_writers.ProcessWriter`\n - :class:`chainer.training.extensions.snapshot_writers.\\\nThreadQueueWriter`\n - :class:`chainer.training.extensions.snapshot_writers.\\\nProcessQueueWriter`\n\n .. seealso::\n\n - :meth:`chainer.training.extensions.snapshot_object`\n \"\"\"\n target, condition, writer, snapshot_on_error = argument.parse_kwargs(\n kwargs,\n ('target', None), ('condition', None), ('writer', None),\n ('snapshot_on_error', False))\n argument.assert_kwargs_empty(kwargs)\n\n if savefun is not None and writer is not None:\n raise TypeError(\n 'savefun and writer arguments cannot be specified together.')\n\n if writer is None:\n if savefun is None:\n savefun = npz.save_npz\n writer = snapshot_writers.SimpleWriter(savefun=savefun)\n\n return _Snapshot(\n target=target, condition=condition, writer=writer, filename=filename,\n snapshot_on_error=snapshot_on_error)\n\n\ndef _always_true():\n return True\n\n\nclass _Snapshot(extension.Extension):\n \"\"\"Trainer extension to take snapshots.\n\n This extension serializes the given object and saves it to the output\n directory.\n\n This extension is called once per epoch by default. To take a\n snapshot at a different interval, a trigger object specifying the\n required interval can be passed along with this extension\n to the `extend()` method of the trainer.\n\n The default priority is -100, which is lower than that of most\n built-in extensions.\n \"\"\"\n trigger = 1, 'epoch'\n priority = -100\n\n def __init__(\n self, target=None, condition=None, writer=None,\n filename='snapshot_iter_{.updater.iteration}',\n snapshot_on_error=False):\n if condition is None:\n condition = _always_true\n if writer is None:\n writer = snapshot_writers.SimpleWriter()\n self._target = target\n self.filename = filename\n self.condition = condition\n self.writer = writer\n self._snapshot_on_error = snapshot_on_error\n\n def on_error(self, trainer, exc, tb):\n super(_Snapshot, self).on_error(trainer, exc, tb)\n if self._snapshot_on_error:\n self._make_snapshot(trainer)\n\n def __call__(self, trainer):\n if self.condition():\n self._make_snapshot(trainer)\n\n def _make_snapshot(self, trainer):\n target = trainer if self._target is None else self._target\n serialized_target = npz.serialize(target)\n filename = self.filename\n if callable(filename):\n filename = filename(trainer)\n else:\n filename = filename.format(trainer)\n outdir = trainer.out\n self.writer(filename, outdir, serialized_target)\n\n def finalize(self):\n if hasattr(self.writer, 'finalize'):\n self.writer.finalize()\n","sub_path":"chainer/training/extensions/_snapshot.py","file_name":"_snapshot.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"33"}
+{"seq_id":"156277569","text":"from tkinter import *\nimport os\nimport cx_Oracle\nimport ocr_text_extraction\nimport database_functions\n\n\ndef delete_screen6():\n screen6.destroy()\n\n\ndef print_bill():\n global screen7\n screen7 = Toplevel(screen)\n screen7.title(\"Print Bill\")\n screen7.geometry(\"150x100\")\n Label(screen7, text=\"Connect with a printer. Bill will be printed\")\n\n\ndef login_success():\n global screen3\n screen3 = Toplevel(screen)\n screen3.title(\"Successful Registration\")\n screen3.geometry(\"1000x750\")\n\n Label(screen3, text=\"Log In Success\").pack()\n Label(screen3, text=\"\").pack()\n Label(screen3, text=\"\").pack()\n Button(screen3, text=\"Scan Number Plate\", width=\"20\", height=\"2\", command=dashboard).pack()\n\n\ndef wrong_password():\n global screen4\n screen4 = Toplevel(screen)\n screen4.title(\"Wrong Password\")\n screen4.geometry(\"1000x750\")\n\n Label(screen4, text=\"Wrong Information\", fg=\"red\").pack()\n Label(screen4, text=\"\").pack()\n Label(screen4, text=\"\").pack()\n Button(screen4, text=\"Try Again!\", fg=\"red\", width=\"20\", height=\"2\", command=login).pack()\n\n\ndef reg_user():\n brdg_info = brdg_id.get()\n tll_info = toll_id.get()\n bth_info = bth_id.get()\n\n c = cx_Oracle.connect('spl2/spl2@localhost/SYSTEM', encoding='UTF-8', nencoding='UTF-8')\n try:\n curs = c.cursor()\n curs.callproc(\"BOOTH_MANAGER_REG\", [brdg_info, tll_info, bth_info])\n except cx_Oracle.DatabaseError as ex:\n err, = ex.args\n print(\"Error code = \", err.code)\n print(\"Error Message = \", err.message)\n os._exit(1)\n\n brdg_id_entry.delete(0, END)\n toll_id_entry.delete(0, END)\n bth_id_entry.delete(0, END)\n\n Label(screen1, text=\"\\n\\n You have successfully completed your registration\", fg=\"green\",\n font=(\"Times New Roman\", 13)).pack()\n\n\ndef register():\n global screen1\n screen1 = Toplevel(screen)\n screen1.title(\"Register\")\n screen1.geometry(\"1000x750\")\n\n global brdg_id\n global toll_id\n global bth_id\n global brdg_id_entry\n global toll_id_entry\n global bth_id_entry\n\n brdg_id = StringVar()\n toll_id = StringVar()\n bth_id = StringVar()\n\n Label(screen1, text=\"Please enter your details\").pack()\n Label(screen1, text=\"\").pack()\n\n global username_entry\n global mail_entry\n global password_entry\n\n Label(screen1, text=\"Bridge ID\").pack()\n brdg_id_entry = Entry(screen1, textvariable=brdg_id)\n brdg_id_entry.pack()\n\n Label(screen1, text=\"Toll Center ID \").pack()\n toll_id_entry = Entry(screen1, textvariable=toll_id)\n toll_id_entry.pack()\n\n Label(screen1, text=\"Booth_ID\").pack()\n bth_id_entry = Entry(screen1, textvariable=bth_id)\n bth_id_entry.pack()\n\n Label(screen1, text=\"\").pack()\n Button(screen1, text=\"Complete Registration\", width=\"20\", height=\"2\", command=reg_user).pack()\n\n\ndef login():\n global screen2\n screen2 = Toplevel(screen)\n screen2.title(\"Log In\")\n screen2.geometry(\"1000x750\")\n\n Label(screen2, text=\"Please enter your details to login\").pack()\n Label(screen2, text=\"\").pack()\n\n global bridge_id_verify\n global bridge_id_entry1\n global tool_plaza_id_verify\n global tool_plaza_id_entry1\n global booth_id_verify\n global booth_id_entry1\n\n bridge_id_verify = StringVar()\n tool_plaza_id_verify = StringVar()\n booth_id_verify = StringVar()\n\n Label(screen2, text=\"Bridge ID\").pack()\n bridge_id_entry1 = Entry(screen2, textvariable=bridge_id_verify)\n bridge_id_entry1.pack()\n\n Label(screen2, text=\"\").pack()\n\n Label(screen2, text=\"Toll Plaza ID\").pack()\n tool_plaza_id_entry1 = Entry(screen2, textvariable=tool_plaza_id_verify)\n tool_plaza_id_entry1.pack()\n\n Label(screen2, text=\"\").pack()\n\n Label(screen2, text=\"Booth ID\").pack()\n booth_id_entry1 = Entry(screen2, textvariable=booth_id_verify)\n booth_id_entry1.pack()\n\n Label(screen2, text=\"\").pack()\n Button(screen2, text=\"Log In\", width=\"30\", height=\"2\", command=login_verify).pack()\n\n\ndef login_verify():\n c = cx_Oracle.connect('spl2/spl2@localhost/SYSTEM', encoding='UTF-8', nencoding='UTF-8')\n try:\n curs = c.cursor()\n cnt = curs.callfunc(\"BOOTH_MANAGER_LOG_IN\", cx_Oracle.NUMBER, [bridge_id_verify.get(), booth_id_verify.get(),\n tool_plaza_id_verify.get()])\n print(cnt)\n if cnt == 1.0:\n login_success()\n else:\n wrong_password()\n\n except cx_Oracle.DatabaseError as ex:\n err, = ex.args\n print(\"Error code = \", err.code)\n print(\"Error Message = \", err.message)\n os._exit(1)\n\n\ndef dashboard():\n global screen6\n screen6 = Toplevel(screen)\n screen6.title(\"Dashboard\")\n screen6.geometry(\"1000x750\")\n\n car_no = ocr_text_extraction.ocr_demo()\n owner = database_functions.get_car_owner(car_no)\n pre_balance = database_functions.get_balance(car_no)\n toll_amount = database_functions.get_toll_amount(car_no)\n updated_balance = pre_balance - toll_amount\n database_functions.update_balance(car_no, updated_balance)\n status = database_functions.car_pass(car_no)\n database_functions.event_log_update(car_no, bridge_id_verify.get(), booth_id_verify.get())\n\n Label(screen6, text=\"Welcome to TOLL TOOL\").pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Car no : \" + car_no).pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Car Owner : \" + owner).pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Total Balance : \" + str(pre_balance)).pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Toll Amount : \" + str(toll_amount)).pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Remaining Balance : \" + str(updated_balance)).pack()\n Label(screen6, text=\"\").pack()\n Label(screen6, text=\"Car Status : \" + status).pack()\n Label(screen6, text=\"\").pack()\n Button(screen6, text=\"Print Bill\", command=print_bill).pack()\n Label(screen6, text=\"\").pack()\n Button(screen6, text=\"Log Out\", command=delete_screen6).pack()\n\n\ndef main_screen():\n global screen\n screen = Tk()\n screen.geometry(\"1000x750\")\n screen.title(\"Toll Tool\")\n Label(text=\"Toll Tool Officer\", bg=\"grey\", width=\"300\", height=\"2\", font=(\"Times New Roman\", 13)).pack()\n Label(text=\"\").pack()\n Button(text=\"Login\", width=\"30\", height=\"2\", command=login).pack()\n ##Button(text=\"Login\", width=\"30\", height=\"2\", command=screen.destroy).pack()\n Label(text=\"\").pack()\n Button(text=\"Register\", width=\"30\", height=\"2\", command=register).pack()\n\n screen.mainloop()\n\n\nmain_screen()\n","sub_path":"Toll_Tool/TollOfficer.py","file_name":"TollOfficer.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"241249922","text":"from django.test import TestCase, override_settings\n\nimport responses\n\nfrom ...ciim.tests.factories import create_response, create_media\nfrom ..models import Image\n\n\n@override_settings(\n KONG_CLIENT_BASE_URL=\"https://kong.test\",\n KONG_IMAGE_PREVIEW_BASE_URL=\"https://media.preview/\",\n)\nclass ImageTestCase(TestCase):\n @responses.activate\n def test_thumbnail_url(self):\n responses.add(\n responses.GET,\n \"https://kong.test/data/search\",\n json=create_response(\n records=[\n create_media(\n thumbnail_location=\"path/to/thumbnail.jpeg\",\n location=\"path/to/image.jpeg\",\n ),\n ]\n ),\n )\n\n images = Image.search.filter(rid=\"\")\n image = images[0]\n\n self.assertEquals(\n image.thumbnail_url, \"https://media.preview/path/to/thumbnail.jpeg\"\n )\n\n @responses.activate\n def test_thumbnail_url_fallback(self):\n responses.add(\n responses.GET,\n \"https://kong.test/data/search\",\n json=create_response(\n records=[\n create_media(\n thumbnail_location=None, location=\"path/to/image.jpeg\"\n ),\n ]\n ),\n )\n\n images = Image.search.filter(rid=\"\")\n image = images[0]\n\n # Fallback serves image through Wagtail instead of from kong\n self.assertEquals(image.thumbnail_url, \"/records/image/path/to/image.jpeg\")\n","sub_path":"etna/records/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"339050543","text":"# # LIBRERIAS\nimport pandas as pd\nimport logging, re, sys, os\nimport numpy as np\n#import infoANDJE.utils.Leer_texto as Leer_texto\nimport Leer_texto as leer\nimport pickle\nfrom optparse import OptionParser\nfrom time import time\nfrom nltk.corpus import stopwords\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.feature_selection import SelectKBest, f_classif, mutual_info_classif\nfrom sklearn.externals import joblib\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom scipy.sparse import coo_matrix, vstack\nfrom scipy.stats import norm\n\ndef hacerMuestra(dataSet, clasificador,p = 0.5, e = 0.05, alpha = 0.05):\n # Exclusion de los proceso usados para el clasificador\n isNew = ~dataSet.datos.ID_DEL_PROCESO.isin(clasificador.dataSet.datos.ID_DEL_PROCESO)\n dataSet.datos = dataSet.datos.loc[isNew, :] \n # calculando muestra\n N = dataSet.datos.shape[0]\n Z = norm.ppf(1- alpha/2)\n n = (N * Z**2 * p * (1-p)) / ((N-1) * e**2 + Z**2 * p * (1-p))\n print('Se calculo una muestra de tamano %d de un total de %d' % (n, N))\n samMue = dataSet.datos.sample(n = round(n, 0), replace=False)\n isMuestra = dataSet.datos.ID_DEL_PROCESO.isin(samMue.ID_DEL_PROCESO.values)\n dataSet.datos['IND_MUESTRA'] = 0\n dataSet.datos.loc[isMuestra, 'IND_MUESTRA'] = 1\n return(dataSet)\n\n# # Graficas de reporte del mejor metodo\ndef show_values(pc, fmt=\"%.2f\", **kw):\n '''\n Heatmap with text in each cell with matplotlib's pyplot\n Source: http://stackoverflow.com/a/25074150/395857 \n By HYRY\n '''\n from itertools import izip\n pc.update_scalarmappable()\n ax = pc.get_axes()\n for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):\n x, y = p.vertices[:-2, :].mean(0)\n if np.all(color[:3] > 0.5):\n color = (0.0, 0.0, 0.0)\n else:\n color = (1.0, 1.0, 1.0)\n ax.text(x, y, fmt % value, ha=\"center\", va=\"center\", color=color, **kw)\n\n\ndef cm2inch(*tupl):\n '''\n Specify figure size in centimeter in matplotlib\n Source: http://stackoverflow.com/a/22787457/395857\n By gns-ank\n '''\n inch = 2.54\n if type(tupl[0]) == tuple:\n return tuple(i/inch for i in tupl[0])\n else:\n return tuple(i/inch for i in tupl)\n\n\ndef heatmap(AUC, title, xlabel, ylabel, xticklabels, yticklabels, figure_width=40, \n figure_height=20, correct_orientation=False, cmap='RdBu', fileOut = 'figura_Clases.png'):\n '''\n Inspired by:\n - http://stackoverflow.com/a/16124677/395857 \n - http://stackoverflow.com/a/25074150/395857\n '''\n\n # Plot it out\n fig, ax = plt.subplots() \n #c = ax.pcolor(AUC, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap='RdBu', vmin=0.0, vmax=1.0)\n c = ax.pcolor(AUC, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap=cmap)\n\n # put the major ticks at the middle of each cell\n ax.set_yticks(np.arange(AUC.shape[0]) + 0.5, minor=False)\n ax.set_xticks(np.arange(AUC.shape[1]) + 0.5, minor=False)\n\n # set tick labels\n #ax.set_xticklabels(np.arange(1,AUC.shape[1]+1), minor=False)\n ax.set_xticklabels(xticklabels, minor=False)\n ax.set_yticklabels(yticklabels, minor=False)\n\n # set title and x/y labels\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel) \n\n # Remove last blank column\n plt.xlim( (0, AUC.shape[1]) )\n\n # Turn off all the ticks\n ax = plt.gca() \n for t in ax.xaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n for t in ax.yaxis.get_major_ticks():\n t.tick1On = False\n t.tick2On = False\n\n # Add color bar\n plt.colorbar(c)\n\n # Add text in each cell \n show_values(c)\n\n # Proper orientation (origin at the top left instead of bottom left)\n if correct_orientation:\n ax.invert_yaxis()\n ax.xaxis.tick_top() \n\n # resize \n fig = plt.gcf()\n #fig.set_size_inches(cm2inch(40, 20))\n #fig.set_size_inches(cm2inch(40*4, 20*4))\n fig.set_size_inches(cm2inch(figure_width, figure_height))\n plt.savefig(fileOut, bbox_inches='tight')\n\ndef split_classification_report(classification_report):\n lines = classification_report.split('\\n')\n classes_res = []\n for line in lines[2 : (len(lines) - 2)]:\n t = line.strip().split()\n t = ['_'.join(t[0:(len(t) - 5 + 1)])] + t[(len(t) - 5 + 1):(len(t))]\n if len(t) < 2: continue\n classes_res.append(t)\n return(pd.DataFrame(classes_res, columns = ['Group', 'Precision', 'Recall', 'F1-score', 'support']))\n\ndef plot_classification_report(classification_report, fileOut, title='Classification report ', cmap='RdBu'):\n '''\n Plot scikit-learn classification report.\n Extension based on http://stackoverflow.com/a/31689645/395857 \n '''\n lines = classification_report.split('\\n')\n\n classes = []\n plotMat = []\n support = []\n class_names = []\n for line in lines[2 : (len(lines) - 2)]:\n t = line.strip().split()\n t = ['_'.join(t[0:(len(t) - 5 + 1)])] + t[(len(t) - 5 + 1):(len(t))]\n if len(t) < 2: continue\n classes.append(t[0])\n v = [float(x) for x in t[1: len(t) - 1]]\n support.append(int(t[-1]))\n class_names.append(t[0])\n #print(v)\n plotMat.append(v)\n\n print('plotMat: {0}'.format(plotMat))\n print('support: {0}'.format(support))\n\n xlabel = 'Metrics'\n ylabel = 'Classes'\n xticklabels = ['Precision', 'Recall', 'F1-score']\n yticklabels = ['{0} ({1})'.format(class_names[idx], sup) for idx, sup in enumerate(support)]\n figure_width = 25\n figure_height = len(class_names) + 7\n correct_orientation = False\n heatmap(np.array(plotMat), title, xlabel, ylabel, xticklabels, yticklabels, \n figure_width, figure_height, correct_orientation, cmap=cmap, fileOut = fileOut)\n\ndef trim(s):\n \"\"\"Trim string to fit on terminal (assuming 80-column display)\"\"\"\n return s if len(s) <= 80 else s[:77] + \"...\"\n\n####################################################################################\n# # Entrenando varios clasificadores\n####################################################################################\ndef clasificadorRest(text, pathVectorizer, pathCLF):\n #text = allNulidad.datos.loc[2, 'texto']\n #pathCLF = './Output/clasificadores/LinearSVC_with_L2-based_Clasificadores_32.pkl'\n #pathVectorizer = './Output/vectorizer.pk'\n # # Lectura de insumos\n clasificador = joblib.load(pathCLF)\n cachedStopWords = stopwords.words(\"spanish\")\n clasificador.named_steps['feature_selection']\n with open(pathVectorizer, 'rb') as fin:\n vectorizer = pickle.load(fin)\n # # vectorizacion y seleccion de caracteristicas\n x = vectorizer.transform(map(lambda x: x.translate(None, '0123456789'), text))\n feature_names = vectorizer.get_feature_names() \n if 'feature_selection' in clasificador.named_steps.keys():\n x = clasificador.named_steps['feature_selection'].transform(x) \n # # clasificacion\n t0 = time() \n pred = clasificador.named_steps['classification'].predict(x)\n test_time = time() - t0\n print(\"tiempo de prediccion: %0.3fs\" % test_time)\n return(pred) \n\n\nclass clasificador:\n def __init__(self, dataSet, nameOut, yCol = 'CAUSA_DE_LA_DEMANDA'):\n # # Lectura de textos\n self.yCol = yCol\n self.clasificador = None\n self.dataSet = dataSet\n self.cachedStopWords = stopwords.words(\"spanish\")\n self.nameOut = nameOut\n self.results = []\n self.classes_results = pd.DataFrame()\n self.SCscore = {}\n self.nameVectorizer = 'vectorizer_' + nameOut + '.pkl'\n print(\"... Lectura de base principal de tamano;....\")\n print(self.dataSet.datos.shape)\n self.X_train_text, self.X_test_text, self.y_train, self.y_test = train_test_split(self.dataSet.datos, \n self.dataSet.datos[self.yCol], \n test_size=0.33, random_state=42)\n self.target_names = None#np.unique(self.y_train) \n \n def searchMDF(self, minSD = 3, numPart = 10):\n if self.clasificador is None:\n self.setMethod()\n clf = self.clasificador['clf'] \n # # Calcular DF\n vectorizer = CountVectorizer(stop_words = self.cachedStopWords, tokenizer = leer.preprocess_text)\n X = vectorizer.fit_transform(map(lambda x: x.translate(None, '0123456789'), self.dataSet.datos.texto))\n print(\"Si tomo el cambio\")\n X[X.nonzero()] = 1\n dfVec = np.ravel(X.sum(axis=0) / float(X.shape[0]))\n # # Encontrar particiones\n liDF = np.mean(dfVec) + minSD * np.std(dfVec)\n lsDF = np.max(dfVec)\n vecDF = np.arange(liDF, lsDF, (lsDF - liDF) / numPart) \n\n self.results = []\n for maxDF in vecDF:\n self.makeVectorizer(maxDF)\n name = self.clasificador['clf_descr'] + '_' + str(maxDF)\n self.results.append(self.benchmark(clf, name))\n \n # # Seleccionando la mejor transformacion\n bestDF = vecDF[np.argmax(map(lambda x: x['score'], self.results))]\n self.makeVectorizer(bestDF)\n\n def makeVectorizer(self, maxDF, minDF = 2, wEliminar = None, outPath = \"Output\"):\n fileVectorizer = os.path.join(outPath, self.nameVectorizer)\n if not os.path.exists(outPath):\n os.makedirs(outPath)\n if os.path.exists(fileVectorizer):\n with open(fileVectorizer, 'rb') as fin:\n self.vectorizer = pickle.load(fin)\n self.X_train = pickle.load(fin)\n self.X_test = pickle.load(fin)\n self.feature_names = self.vectorizer.get_feature_names()\n else:\n if not wEliminar is None:\n wEliminar = self.cachedStopWords + wEliminar\n else:\n wEliminar = self.cachedStopWords\n print(maxDF)\n print(minDF)\n self.vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = maxDF, min_df = minDF,\n stop_words = wEliminar, tokenizer = leer.preprocess_text)\n self.X_train = self.vectorizer.fit_transform(map(lambda x: x.translate(None, '0123456789'),\n self.X_train_text.texto.values))\n self.X_test = self.vectorizer.transform(map(lambda x: x.translate(None, '0123456789'), \n self.X_test_text.texto.values))\n self.feature_names = self.vectorizer.get_feature_names()\n with open(fileVectorizer, 'wb') as fin:\n pickle.dump(self.vectorizer, fin)\n pickle.dump(self.X_train, fin)\n pickle.dump(self.X_test, fin)\n \n def cargar(self, outPath = \"Output\"):\n # # Carga de vectorizer \n self.makeVectorizer(maxDF = 0.99) \n print(\"Listo self.datos de entrenamiento..... n_samples: %d, n_features: %d\" % self.X_train.shape)\n print(\"Listo self.datos de prueba............ n_samples: %d, n_features: %d\" % self.X_test.shape)\n \n # # Carga de clasificadores\n outDir = os.path.join(outPath, \"clasificadores\")\n for ii, jj, zz in os.walk(outDir):\n for file_clf in zz:\n if re.match('.+_' + self.nameOut + '.pkl', file_clf):\n name = re.sub(\"(.+)_\"+ self.nameOut + '.pkl', \"\\\\1\", file_clf)\n clf = joblib.load(os.path.join(ii, file_clf))\n t0 = time() \n if name == \"Random forest\":\n pred = clf.predict(self.X_test.toarray())\n else:\n pred = clf.predict(self.X_test)\n test_time = time() - t0 \n score = metrics.accuracy_score(self.y_test, pred)\n print(\"accuracy: %0.3f\" % score) \n self.results.append({'clf_descr' : name, 'score' : score, \n 'train_time' : 0, 'test_time' : test_time, \n 'file_clf' : file_clf})\n \n def benchmark(self, clf, name = \"\", flagMap = False, outPath = \"Output\"):\n # # CV score\n auxNCLF = re.sub('(\\\\s)', '_', name)\n #self.SCscore[auxNCLF] = cross_validate(clf, vstack([self.X_test, self.X_train]), \n # self.y_test.append(self.y_train), cv=10) \n print('_' * 80)\n print(\"Training: \")\n print(clf)\n t0 = time()\n if name == \"Random forest\":\n clf.fit(self.X_train.toarray(), self.y_train)\n else:\n clf.fit(self.X_train, self.y_train)\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n t0 = time()\n \n if name == \"Random forest\":\n pred = clf.predict(self.X_test.toarray())\n else:\n pred = clf.predict(self.X_test)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n score = metrics.accuracy_score(self.y_test, pred)\n print(\"accuracy: %0.3f\" % score)\n \n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_)) \n #print(\"top 10 keywords per class:\")\n #for i, label in enumerate(target_names):\n #top10 = np.argsort(clf.coef_[i])[-10:]\n #print(trim(\"%s: %s\" % (label, \" \".join(self.feature_names[top10]))))\n #rint()\n print(\"classification report:\")\n clf_rep = metrics.classification_report(self.y_test, pred,\n target_names=self.target_names)\n print(clf_rep)\n\n if flagMap:\n file_clf = os.path.join(outDir, 'fgCla_' + re.sub('(\\\\s)', '_', name) + '.png')\n plot_classification_report(clf_rep, file_clf)\n\n clf_rep = split_classification_report(clf_rep)\n clf_rep['Method'] = name\n self.classes_results = pd.concat([self.classes_results, clf_rep])\n \n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(self.y_test, pred))\n\n outDir = os.path.join(outPath, \"clasificadores\")\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n \n file_clf = os.path.join(outDir, re.sub('(\\\\s)', '_', name)+ '_' + self.nameOut + '.pkl')\n print(file_clf)\n joblib.dump(clf, file_clf) \n \n return {'clf_descr' : name, 'score' : score, \n 'train_time' : train_time, 'test_time' : test_time, \n 'file_clf' : file_clf}\n\n def entrenar(self, maxDF = 0.5, minDF = 2, wEliminar = None):\n self.wEliminar = wEliminar\n self.makeVectorizer(maxDF, minDF, wEliminar) \n print(\"Listo self.datos de entrenamiento..... n_samples: %d, n_features: %d\" % self.X_train.shape)\n print(\"Listo self.datos de prueba............ n_samples: %d, n_features: %d\" % self.X_test.shape)\n \n for clf, name in (\n (RidgeClassifier(tol=1e-2, solver=\"lsqr\"), \"Ridge Classifier\"),\n (Perceptron(n_iter=50), \"Perceptron\"),\n (PassiveAggressiveClassifier(n_iter=50), \"Passive-Aggressive\"),\n #(KNeighborsClassifier(n_neighbors=10), \"kNN\"),\n #(RandomForestClassifier(n_estimators=100), \"Random forest\")\n ):\n print('=' * 80)\n print(name)\n self.results.append(self.benchmark(clf, name))\n for penalty in [\"l2\", \"l1\"]:\n print('=' * 80)\n print(\"%s penalty\" % penalty.upper())\n # Train Liblinear model\n self.results.append(self.benchmark(LinearSVC(penalty=penalty,\n dual=False, tol=1e-3), \"LinearSVC \" + penalty))\n # Train SGD model\n self.results.append(self.benchmark(SGDClassifier(alpha=.0001, n_iter=50,\n penalty=penalty), \"SGD\"))\n # Train SGD with Elastic Net penalty\n print('=' * 80)\n print(\"Elastic-Net penalty\")\n self.results.append(self.benchmark(SGDClassifier(alpha=.0001, n_iter=50,\n penalty=\"elasticnet\"), \"SGD Elastic\"))\n\n # Train NearestCentroid without threshold\n print('=' * 80)\n print(\"NearestCentroid (aka Rocchio classifier)\")\n self.results.append(self.benchmark(NearestCentroid(), \"NearestCentroid\"))\n \n # Train sparse Naive Bayes classifiers\n print('=' * 80)\n print(\"Naive Bayes\")\n self.results.append(self.benchmark(MultinomialNB(alpha=.01), \"Multinomial NB\"))\n self.results.append(self.benchmark(BernoulliNB(alpha=.01), \"Bernoulli NB\"))\n \n print('=' * 80)\n print(\"LinearSVC with L1-based feature selection\")\n # The smaller C, the stronger the regularization.\n # The more regularization, the more sparsity.\n self.results.append(self.benchmark(Pipeline([\n ('feature_selection', SelectFromModel(LinearSVC(penalty=\"l2\", dual=False,\n tol=1e-3))),\n ('classification', LinearSVC(penalty=\"l1\", dual=False))]),\n \"LinearSVC with L1-based\"))\n\n # # Lines SVC l2\n self.results.append(self.benchmark(Pipeline([\n ('feature_selection', SelectFromModel(LinearSVC(penalty=\"l2\", dual=False,\n tol=1e-3))),\n ('classification', LinearSVC(penalty=\"l2\"))]),\n \"LinearSVC_L2\")) \n \n def fitgridCV(self, maxDF = 0.5, minDF = 2, wEliminar = [], cv = 3,\n percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100), \n feature_method = f_classif, clf = LinearSVC(penalty=\"l2\"), name = \"LinearSVC_L2\"):\n # # Haciendo vectorizacion manual\n # fileVectorizer = os.path.join(\"Output\", 'vectorizer.pk')\n # if os.path.exists(fileVectorizer):\n # with open(fileVectorizer, 'rb') as fin:\n # self.vectorizer = pickle.load(fin)\n # X_train = self.vectorizer.transform(map(lambda x: x.translate(None, '0123456789'), \n # self.dataSet.datos.texto.values))\n # else:\n # self.vectorizer = TfidfVectorizer(sublinear_tf = True, min_df = minDF, max_df = maxDF, \n # stop_words = stopwords.words(\"spanish\") + wEliminar, \n # tokenizer = leer.preprocess_text)\n # X_train = self.vectorizer.fit_transform(map(lambda x: x.translate(None, '0123456789'), \n # self.dataSet.datos.texto.values))\n self.makeVectorizer(maxDF, minDF, wEliminar) \n X_train = vstack([self.X_test, self.X_train])\n y_train = self.y_test.append(self.y_train)\n nWords = map(lambda x: X_train.shape[1] * x / 100, percentiles)\n kbest = SelectKBest(feature_method)\n pipeline = Pipeline([('kbest', kbest), ('classification', clf)])\n grid_search = GridSearchCV(pipeline, {'kbest__k': nWords}, cv = cv)\n grid_search.fit(X_train, y_train)\n\n # # Plot the cross-validation score as a function of percentile of features\n plt.figure(figsize=(10,8))\n plt.errorbar(percentiles, grid_search.cv_results_['mean_test_score'], grid_search.cv_results_['std_test_score'])\n plt.title(u'SVC-L2 seleccion palabras ('+ feature_method.__name__+ ')')\n plt.xlabel(u'No. de Palabras')\n plt.ylabel(u'ACC')\n plt.axis('tight')\n plt.show()\n\n # # Archivo de salida\n self.results.append(self.benchmark(grid_search.best_estimator_, feature_method.__name__+'_gridCV_' + name)) \n # outDir = os.path.join(\"Output\", \"clasificadores\")\n # name = 'gridCV_' + name\n # file_clf = os.path.join(outDir, re.sub('(\\\\s)', '_', name)+ '_' + self.nameOut + '.pkl')\n # joblib.dump(grid_search.best_estimator_, file_clf) \n # resultBest = {k: v[grid_search.best_index_] for k, v in grid_search.cv_results_.iteritems()}\n # self.results.append({'clf_descr' : name, 'score' : grid_search.best_score_, \n # 'train_time' : resultBest['mean_fit_time'], 'test_time' : resultBest['mean_score_time'], \n # 'file_clf' : file_clf})\n \n def comparaClasificadores(self): # # Graficas comparacion de metodos\n indices = np.arange(len(self.results))\n colTomar = ['train_time', 'test_time', 'file_clf', 'score', 'clf_descr']\n results2 = [[x[i] for x in self.results] for i in colTomar]\n \n training_time, test_time, file_clf, score, clf_names = results2\n training_time = np.array(training_time) / np.max(training_time)\n test_time = np.array(test_time) / np.max(test_time)\n \n self.f = plt.figure(figsize=(12, 8))\n plt.title(\"Score\")\n plt.barh(indices, score, .2, label=\"score\", color='navy')\n plt.barh(indices + .3, training_time, .2, label=\"training time\",\n color='c')\n plt.barh(indices + .6, test_time, .2, label=\"test time\", color='darkorange')\n plt.yticks(())\n plt.legend(loc='best')\n plt.subplots_adjust(left=.25)\n plt.subplots_adjust(top=.95)\n plt.subplots_adjust(bottom=.05)\n for i, c in zip(indices, clf_names):\n plt.text(-.3, i, c)\n plt.savefig(os.path.join('Output', 'fgCom_' + self.nameOut + '.png'))\n plt.show()\n self.resum = pd.DataFrame(np.transpose(results2), columns = colTomar)\n \n \n def setMethod(self, clasificador = None): # # Selecciona el mejor o el definido por el usuario\n if clasificador:\n self.clasificador = [ii for ii in self.results if ii['clf_descr'] == clasificador][0]\n else:\n maxScore = np.max(map(lambda x: x['score'], self.results))\n self.clasificador = [ii for ii in self.results if ii['score'] == maxScore][0]\n self.clasificador['clf_descr'] = re.sub('(\\\\s)', '_', self.clasificador['clf_descr'])\n # # Cargando clasificador\n self.clasificador['clf'] = joblib.load(self.clasificador['file_clf'])\n\n if hasattr(self.clasificador['clf'], 'named_steps'):\n if 'feature_selection' in self.clasificador['clf'].named_steps.keys():\n self.clasificador['feat'] = self.clasificador['clf'].named_steps['feature_selection']\n if 'kbest' in self.clasificador['clf'].named_steps.keys():\n self.clasificador['feat'] = self.clasificador['clf'].named_steps['kbest']\n self.clasificador['clf'] = self.clasificador['clf'].named_steps['classification']\n self.X_test = self.clasificador['feat'].transform(self.X_test)\n self.X_train = self.clasificador['feat'].transform(self.X_train)\n\n print(\"... Se selecciono el clasificador:\" + self.clasificador['clf_descr'])\n\n #####################################################################################\n # # Salida en Excel\n #####################################################################################\n namExperi = self.nameOut + '.xlsx'\n writer = pd.ExcelWriter(os.path.join('Output', namExperi), engine='xlsxwriter')\n # # Resumen comparacion\n self.comparaClasificadores()\n auxResul = self.resum[['clf_descr', 'score', 'train_time', 'test_time']].sort(['score'], ascending = False) \n auxResul.to_excel(writer, sheet_name='Resul_clasificadores', startrow = 2, startcol = 14, index = False)\n worksheet = writer.sheets['Resul_clasificadores']\n worksheet.insert_image('A1', os.path.join('Output', 'fgCom_' + self.nameOut + '.png'), {'x_scale': 0.7, 'y_scale': 0.7})\n worksheet.set_column('O:R', 18)\n # # Otros clasificadores\n self.classes_results.to_excel(writer, sheet_name='Resultados_otros_clasificadores', index = False)\n worksheet = writer.sheets['Resultados_otros_clasificadores']\n worksheet.set_column('B:F', 18)\n # # Agregar resultados mejor clasificador\n pred = self.clasificador['clf'].predict(self.X_test)\n clf_rep = metrics.classification_report(self.y_test, pred,\n target_names=self.target_names)\n file_clf = os.path.join('Output', 'fgClases_' + self.nameOut + '.png')\n plot_classification_report(clf_rep, file_clf)\n worksheet = writer.book.add_worksheet('Mejor_clasificador_Test')\n worksheet.insert_image('A1', file_clf)\n # # Agregar resultado mejor clasificador Test + Trainng\n pred = self.clasificador['clf'].predict(vstack([self.X_test, self.X_train]))\n clf_rep = metrics.classification_report(self.y_test.append(self.y_train), pred,\n target_names=self.target_names)\n file_clf = os.path.join('Output', 'fgClasesALL_' + self.nameOut + '.png')\n plot_classification_report(clf_rep, file_clf)\n worksheet = writer.book.add_worksheet('Mejor_clasif_Training+Test')\n worksheet.insert_image('A1', file_clf)\n # # Cross validation mejor clasificador\n writer.save()\n \n \n def keyWords(self, nKeyWords = 20, fileOut = None):\n if self.clasificador is None:\n sys.exit(\"Error se debe seleccionar un clasificador funcion '.setMethod'\")\n\n if not os.path.exists(\"Output\"):\n os.makedirs(\"Output\")\n\n if fileOut is None:\n fileOut = os.path.join(\"Output\", self.clasificador['clf_descr'] + '_keyWords.txt')\n \n # # Extraccion de caracteristicas\n if 'feat' in self.clasificador.keys():\n feat_pipline = self.clasificador['feat'].transform(np.arange(len(self.feature_names)).reshape(1, -1))[0]\n feat_pipline = [self.feature_names[pp] for pp in feat_pipline]\n else:\n feat_pipline = self.feature_names\n\n # # Extraccion de key words\n self.key_Words = pd.DataFrame()\n print(\"Creando top \" + str(nKeyWords) + \" keywords por cada clase.\")\n for i, label in enumerate(self.clasificador['clf'].classes_):\n top10_sco = np.argsort(self.clasificador['clf'].coef_[i])[-nKeyWords:]\n top10 = [feat_pipline[pp] for pp in top10_sco]\n auxCau = np.repeat(label, len(top10))\n top10_sco = self.clasificador['clf'].coef_[i][top10_sco]\n self.key_Words = pd.concat([self.key_Words, \n pd.DataFrame({'CAUSA': auxCau, \n 'KEY_WORD' : top10, 'SCORE': top10_sco})])\n writer = pd.ExcelWriter(fileOut)\n self.key_Words.to_excel(writer, 'keyWords')\n writer.save()\n #self.key_Words.to_csv(fileOut, sep=';',dtype=str,index= False)\n print(\"... Se escribio archivo: \", fileOut)\n \n def clasificar(self, x = None, nClasses = 1, \n includeProb = True, y = None, fileOut = None):\n flagNewX = x is None\n if flagNewX: \n x = self.X_test_text\n if y is None: y = self.y_test\n if self.clasificador is None:\n sys.exit(\"Error se debe seleccionar un clasificador funcion '.setMethod'\")\n\n if fileOut is None:\n if not os.path.exists(\"Output\"):\n os.makedirs(\"Output\")\n fileOut = os.path.join(\"Output\", self.clasificador['clf_descr'] + '_clasificacion.xlsx')\n \n # # Hacer representacion tf-idf\n if re.match('dataSet', x.__class__.__name__):\n salidaCausa = x.datos \n x = self.vectorizer.transform(x.getText())\n if not flagNewX and 'feat' in self.clasificador.keys():\n x = self.clasificador['feat'].transform(x) \n else:\n if not re.match('.+matrix', type(x).__name__):\n sys.exit(\"Error el parametro 'x' debe ser de tipo 'scipy.matrix'\")\n else:\n salidaCausa = pd.DataFrame(self.X_test_text)[[0, 2]]\n salidaCausa.columns = ['ID_DEL_PROCESO', 'CAUSA_REAL']\n\n # # Hacer la prediccion\n t0 = time() \n if self.clasificador['clf_descr'] == \"Random_forest\":\n pred = self.clasificador['clf'].predict(x.toarray())\n else:\n pred = self.clasificador['clf'].predict(x)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n \n # # Evalular desempeno de la prediccion\n if not y is None: \n score = metrics.accuracy_score(self.y_test, pred)\n print(\"Lista la clasificacion (tiempo-ejecucion = %d)............ score: %d\" % [test_time, score])\n\n # # Organizando salidas\n salidaCausa['CAUSA_MODELO'] = pred\n salidaCausa[['ID_DEL_PROCESO', 'CAUSA_MODELO', 'IND_MUESTRA']].to_excel(fileOut, index= False)\n return(salidaCausa)","sub_path":"djangorest/infoANDJE/utils/clasificacion.py","file_name":"clasificacion.py","file_ext":"py","file_size_in_byte":30587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"360553343","text":"class Ajitesh:\r\n def aj(self):\r\n self.name= name \r\n self.height = height\r\n #print(self.name,self.height) \r\n\r\n def show(self):\r\n print(self.name+\"'s height is \"+str(self.height))\r\nAj1= Ajitesh()\r\nAj1.name = \"Ajitesh Mishra\"\r\nAj1.height = 5.9 #instance attribute\r\nAj1.show() #calling with object\r\nAjitesh.show(Aj1) # calling the function with class name\r\n\r\n\r\n","sub_path":"Python Oops/Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"20922252","text":"import matplotlib.pyplot as plt\n#from sklearn.manifold import TSNE\nimport numpy as np\nfrom simulation import Simulation\nfrom settings import *\n\nSIM_LENGTH = 200\n\nif __name__ == \"__main__\":\n simulation = Simulation()\n simulation.run(SIM_LENGTH)\n \n # plot degrees of each Individual\n x = range(len(simulation.network.individuals))\n y = [ len(indv.followers) for indv in simulation.network.individuals ]\n plt.plot(x, y, \"b.\")\n plt.show(\"Degree Distribution\")\n plt.close()\n\n print(\"Individual\\t\\tDegree\")\n for indv in simulation.network.individuals:\n f = len(indv.followers)\n if f > 0:\n print(f\"{indv.name}\\t\\t{f}\")\n \n # plot message counts over time\n x = range(len(simulation.postmaster.messageCountHistory))\n plt.plot(x, simulation.postmaster.messageCountHistory, \"r.\")\n plt.show(\"Message Sends Per Iteration\")\n plt.close()\n\n allOpinions = [ indv.opinions for indv in simulation.network.individuals ]\n opinionArray = np.array([ [ opinion[topic] for topic in TOPICS ] for opinion in allOpinions ])\n coords = TSNE(n_components=2).fit_transform(opinionArray)\n xEmbedding = list()\n yEmbedding = list()\n\n for c in coords:\n xEmbedding.append(c[0])\n yEmbedding.append(c[1])\n plt.scatter(xEmbedding, yEmbedding)\n plt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"647503112","text":"import os\nimport string\n\nfrom app.project_type.docker_container import DockerContainer\nfrom app.project_type.project_type import ProjectType\nfrom app.util.conf.configuration import Configuration\n\n\nclass Docker(ProjectType):\n \"\"\"\n Example API call to invoke a docker-type build.\n {\n \"type\": \"docker\",\n \"image\": \"pod4101-automation1102.pod.box.net:5000/webapp_v5_dev:latest\",\n \"project_directory\": \"/box/www/current\",\n \"host\": \"pod4101-tester.dev.box.net\",\n \"user\": \"jenkins\"\n }\n \"\"\"\n\n def __init__(self, image, project_directory, mounted_volumes=None, user=None, host=None, config=None,\n job_name=None, build_project_directory=None, remote_files=None):\n \"\"\"\n Note: the first line of each parameter docstring will be exposed as command line argument documentation for the\n clusterrunner build client.\n\n :param image: url to the image with tag (ie: docker01.dev.box.net/webapp_v5dev:latest)\n :type image: string\n :param project_directory: path within the docker image that contains cluster_runner.yaml\n :type project_directory: string\n :param mounted_volumes: key-values of mounted host:container directories\n :type mounted_volumes: dict of [str, str]\n :param user: the user to run the container as\n :type user: string|None\n :param host: the hostname to assign for the container\n :type host: string|None\n :param config: a yaml string representing the project_type's config\n :type config: str|None\n :param job_name: a list of job names we intend to run\n :type job_name: list [str] | None\n :param remote_files: dictionary mapping of output file to URL\n :type remote_files: dict[str, str] | None\n \"\"\"\n super().__init__(config, job_name, remote_files)\n self.project_directory = project_directory\n self._image = image\n\n artifact_dir = Configuration['artifact_directory']\n mounted_volumes = mounted_volumes or {}\n mounted_volumes.setdefault(artifact_dir, artifact_dir)\n\n self._container = DockerContainer(image, user, host, mounted_volumes)\n\n def _fetch_project(self):\n pull_command = 'docker pull {}'.format(self._image)\n self._execute_in_project_and_raise_on_failure(pull_command, 'Could not pull Docker container.')\n\n def _get_config_contents(self):\n \"\"\"\n Get the contents of cluster_runner.yaml from a Docker container\n :return: The contents of cluster_runner.yaml\n :rtype: str\n \"\"\"\n yaml_path = os.path.join(self.project_directory, Configuration['project_yaml_filename'])\n raw_config_contents, _ = self.execute_command_in_project(\"cat \" + yaml_path)\n\n if raw_config_contents is None:\n raise RuntimeError('Could not read {} from the Docker container'.format(yaml_path))\n\n return raw_config_contents\n\n def _setup_executors(self, executors, project_type_params):\n \"\"\"\n Run the job config setup on each executor's project_type. This override is necessary because a container is\n started for each executor, and the job config's setup command should run on each of them.\n :type executors: list [SubjobExecutor]\n :type project_type_params: dict [str, str]\n \"\"\"\n super()._setup_executors(executors, project_type_params)\n for executor in executors:\n executor.run_job_config_setup()\n\n def execute_command_in_project(self, command, extra_environment_vars=None, **popen_kwargs):\n \"\"\"\n Execute a command in the docker container. Starts a docker session\n\n :param command: the shell command to execute\n :type command: string\n :param extra_environment_vars: additional environment variables to set for command execution\n :type extra_environment_vars: dict[str, str]\n :param popen_kwargs: Note: this is unused in the docker project_type\n :type popen_kwargs: dict\n :return: a tuple of (the string output from the command, the exit code of the command)\n :rtype: (string, int)\n \"\"\"\n environment_setter = self.shell_environment_command(extra_environment_vars)\n command = self.command_in_project('{} {}'.format(environment_setter, command))\n self._logger.debug('Executing command in project: {}', command)\n\n return self._container.run(command)\n\n def setup_executor(self):\n \"\"\"\n Start a new docker session via which commands will be executed.\n \"\"\"\n self._container.start_session()\n\n def teardown_executor(self):\n \"\"\"\n Close the running docker session.\n \"\"\"\n self._container.end_session()\n\n def timing_file_path(self, job_name):\n \"\"\"\n :type job_name: str\n :return: the absolute path to where the timing file for job_name SHOULD be. This method does not guarantee\n that the timing file exists.\n :rtype: string\n \"\"\"\n # There can be a colon in the URL part of the docker image, so we only want to check the image_and_tag\n # portion of the full docker image path for a colon (in order to strip out the tag).\n image_and_tag = self._image.rsplit('/', 1)[-1]\n\n if ':' in image_and_tag:\n full_image_without_tag = self._image.rsplit(':', 1)[0]\n else:\n full_image_without_tag = self._image\n\n file_system_friendly_docker_image = self._remove_file_system_unfriendly_characters(full_image_without_tag)\n return os.path.join(\n Configuration['timings_directory'],\n file_system_friendly_docker_image,\n \"{}.timing.json\".format(job_name)\n )\n\n def kill_subprocesses(self):\n \"\"\"\n Signal the environment that any currently running subprocesses should be terminated. This is a no-op for Docker\n environments since we can just kill the entire container.\n \"\"\"\n pass\n\n def project_id(self):\n # Docker cannot fetch multiple containers in parallel, so the project_id for all docker-project_type\n # builds must be done serially.\n return 'docker'\n\n def _remove_file_system_unfriendly_characters(self, unescaped_path):\n \"\"\"\n Escape the string unescaped_path to be POSIX directory format compliant.\n\n :param unescaped_path: the original, unescaped string\n :type unescaped_path: string\n :rtype: string\n \"\"\"\n valid_chars = \"-_.()%s%s\" % (string.ascii_letters, string.digits)\n return ''.join(c for c in unescaped_path if c in valid_chars)\n","sub_path":"app/project_type/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"594323816","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nurl = \"https://api.photozou.jp/rest/search_public.xml?keyword=%E7%8C%AB\"\nresponse = urllib.request.urlopen(url)\nrss = response.read().decode(\"utf-8\")\n\nsoup = BeautifulSoup(rss, \"xml\")\n\nfor s in soup.find_all(\"photo\"):\n print(s.find_all(\"image_url\")[0].string)","sub_path":"img_url_scraping.py","file_name":"img_url_scraping.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"356519225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 1 15:58:30 2019\n\n@author: \n Jan Brekelmans\n j.j.w.c.brekelmans@gmail.com\n\"\"\"\n\n\"\"\"\nFunctions used for Project Euler solutions\n\"\"\"\n\nimport numpy as np\nimport math\nfrom decimal import *\n\n\n# Test if the integer n is prime\ndef is_prime(n):\n if n == 1:\n return False\n elif n == 2 or n == 3:\n return True\n elif n%2 == 0:\n return False\n elif n%3 == 0:\n return False\n else:\n for i in range(5,int(n**.5) + 1,2):\n if n%i == 0:\n return False\n return True\n\n# Get a list of the prime factors of n, according to their multiplicity\n# Example: primeFactors(24)= [2,2,2,3]\ndef primeFactors(n):\n factors = []\n \n while n%2 == 0 and n > 1:\n factors.append(2)\n n = n//2\n \n while n%3 ==0 and n > 1:\n factors.append(3)\n n = n//3\n \n \n for i in range(3,n+1,2):\n if n < 1.5:\n break\n while n%i == 0:\n factors.append(i)\n n = n//i\n \n \n return factors\n\n# Check if the number n is a palindrome\ndef is_palindrome(n):\n lst = [int(i) for i in str(n)]\n if lst == lst[::-1]:\n return True\n return False\n\n# Compute the greatest common divisor of the integers x and y\ndef GCD(x,y):\n \n while y is not 0:\n x,y = y, x%y\n return x\n\n# Compute the lowest common multiple of the integers x and y\ndef LCM(x,y):\n return x*y//GCD(x,y)\n\n# Generate a list of primes of the first n numbers\ndef prime_sieve(n):\n sieve = [True]*n\n \n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i] = [False]*((n-i*i-1)//(2*i)+1)\n return [2]+[i for i in range(3,n,2) if sieve[i]]\n\n# Generate a boolean array where TRUE is for prime and FALSE for composite\ndef prime_check(n):\n sieve = prime_sieve(n)\n \n check = [False]*(n+1)\n \n for i in sieve:\n check[i] = True\n \n return check\n\n\n# Returns an upper bound for the n-th prime number\ndef prime_upper_bound(n):\n upper_bound = n*(np.log(n) + np.log(np.log(n)))\n \n return int(upper_bound)\n\n# Compute the number of divisors of integer n.\ndef number_of_divisors(n):\n primes = primeFactors(n)\n \n count = np.bincount(np.array(primes))\n count = count+1\n \n total = np.prod(count)\n \n return total\n\n# Compute the Euler totient function of n, the number of integers 1<=k<=n \n# such that gcd(n,k)=1\ndef Euler_totient(n):\n factors = primeFactors(n)\n \n factors = set(factors)\n \n total = n\n \n for i in factors:\n total = total - total//i\n return total\n\n# Compute the Collatz sequence chain of the integer n, which halts when it \n# reaches 1\ndef Collatz_sequence_chain(n):\n chain = [n]\n while n is not 1:\n if n%2 == 0:\n n = n//2\n else:\n n = 3*n+1\n chain.append(n)\n \n return chain\n\n# Computes the next number in the Collatz sequence\ndef Collatz(n):\n if n == 1:\n return 1\n elif n%2 == 0:\n return n//2\n else:\n return 3*n+1\n\n# Returns the number n in words, n<= 1000\ndef to_words(n):\n ones = [\"zero\",\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\",\\\n \"nine\", \"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\",\\\n \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\",\"nineteen\"]\n tens = [\"\",\"\",\"twenty\",\"thirty\",\"forty\",\"fifty\",\"sixty\",\"seventy\",\"eighty\",\"ninety\"]\n \n if n < 20:\n return ones[n] + \" \"\n elif n < 100:\n return tens[n//10] + \" \" + ((to_words(n%10) + \" \") if (n%10 is not 0) else \"\")\n elif n < 1000:\n return ones[n//100] + \" hundred \" + \\\n ((\"and \" + to_words(n%100)) if (n%100 is not 0) else \"\")\n elif n < 10000:\n return ones[n//1000] + \" thousand \" + \\\n ((to_words(n%1000)) if (n%1000 is not 0) else \"\")\n\n# Calculate the proper divisors of integer n\ndef proper_divisors(n):\n return [x for x in range(1,(n+1)//2+1) if n%x == 0 and n!=x]\n\n# Calculate the divisors of integer n\ndef divisors(n):\n lst = proper_divisors(n)\n lst.append(n)\n return lst\n\n# Check if the integer n is perfect.\ndef is_perfect(n):\n if n == sum(proper_divisors(n)):\n return True\n return False\n\n# Check if the integer n is abundant.\ndef is_abundant(n):\n if n < sum(proper_divisors(n)):\n return True\n return False\n\n# Check if the integer n is deficient.\ndef is_deficient(n):\n if n > sum(proper_divisors(n)):\n return True\n return False\n\n# Compute the n-th Fibonacci number., F1 = 1, F2 = 1\n# We use Fn = floor(phi^n / sqrt(5) + 1)\n# Only availabe for n such that F_n fits in a double\ndef Fibonacci_quick(n):\n phi = (1+5**.5)/2\n \n F = np.floor(phi**(n+1)/(5**.5)+1/2)\n \n return int(F)\n\n# Compute the first n Fibonacci numbers, and returns the array of these values\ndef Fibonacci_array(n):\n numbers = [0]*n\n \n numbers[0] = 1\n numbers[1] = 2\n \n for i in range (n-2):\n numbers[i+2] = numbers[i] + numbers[i+1]\n \n return numbers\n\n# Compute the index of the lowest Fibonacci number greater or equal to x\ndef Fibonnaci_ceil(x):\n phi = (1+5**.5)/2\n \n n = np.ceil(np.log(x*5**.5 + .5)/np.log(phi)) - 1\n \n return int(n)\n\ndef factorial(n):\n if n == 0 or n == 1:\n return 1\n else:\n i = 1\n for j in range(1,n+1):\n i = i * j\n return i\n\ndef is_binary_palindrome(n):\n binary = bin(n)\n binary = str(binary)[2:]\n \n if binary == binary[::-1]:\n return True\n return False\n\ndef binomial(n,k):\n return math.factorial(n) // math.factorial(k) // math.factorial(n-k)\n\n# Computes the n-th triangle number\ndef triangle(n):\n return binomial(n+1,2)\n\n# Return the alphabetical value of character\ndef char_place(char):\n return ord(char.lower()) - 96\n\n# Return a list of the first n-convergents of e\ndef convergent_e(total):\n \n precomp = [2,1,2]\n \n lst = []\n lst.append(precomp)\n \n rest = (total-3)//3 + 1\n \n for i in range(rest):\n lst.append([1,1,(2+i)*2])\n \n lst = [x for l in lst for x in l]\n \n return lst[:total]\n\n# Returns a fraction from a convergent\ndef convergent_to_fraction(convergent):\n num = 1\n den = 0\n \n for u in reversed(convergent):\n num, den = den+num*u,num\n \n return den,num\n\n# Calculate the power a^m mod n using square and multiply\ndef square_and_multiply(a,m,n):\n binary = bin(m)[2:]\n \n c = a\n \n lst = [int(i) for i in str(binary)]\n \n for i in range(1,len(lst)):\n c = (c*c) % n\n \n if lst[i] == 1:\n c = (a*c) % n\n \n return c\n \n\n","sub_path":"Python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"133926329","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Variables de decisión\n\n# In[12]:\n\n\n# X_m_f_p \n# si en el mes m en la región z el contrato f realiza el plan p\n\nmeses = [i for i in range(1,13)]\nplanes_contrato_1 = {'contrato': 1, 'planes': [(1,2),(2,1)]}\n#planes_contrato_2 = {'contrato': 2, 'planes': }\n#planes_contrato_3 = {'contrato': 3, 'planes': }\n#planes_contrato_4 = {'contrato': 4, 'planes': }\n\n#planes_contratos = [planes_contrato_1, planes_contrato_2, planes_contrato_3, planes_contrato_4]\nplanes_contratos = [planes_contrato_1]\n\ndef generador_de_x(meses,planes_contratos):\n combinaciones = []\n for contrato in planes_contratos:\n for mes in meses:\n for plan in contrato['planes']:\n combinaciones.append({'mes':mes,'contrato':contrato['contrato'],'plan':plan,'valor':1})\n combinaciones.append({'mes':mes,'contrato':contrato['contrato'],'plan':plan,'valor':0})\n return combinaciones\n \nX_m_f_p = generador_de_x(meses,planes_contratos)\n\n\n# In[17]:\n\n\n# Y_m_d \n# cargas a enviar por transporte SPOT en el mes m al puerto d \n\nmeses = [i for i in range(1,13)]\npuertos = [i for i in range(1,9)]\npotenciales_cargas_spot = [i for i in range(1,6)] ### SE DEBE DEFINIR BIEN \n\ndef generador_de_y(meses,puertos,potenciales_cargas_spot):\n combinaciones = []\n for mes in meses:\n for puerto in puertos:\n for carga in potenciales_cargas_spot:\n combinaciones.append({'mes':mes,'puerto':puerto,'carga':carga})\n return combinaciones\n\nY_m_d = generador_de_y(meses,puertos,potenciales_cargas_spot)\n\n\n# In[20]:\n\n\n# B_m_d\n# cargas a enviar en el mes m al puerto d\n\nmeses = [i for i in range(1,13)]\npuertos = [i for i in range(1,9)]\ncargas = [i for i in range(1,100)] ##### SE DEBE DEFINIR BIEN \n\ndef generador_de_y(meses,puertos,cargas):\n combinaciones = []\n for mes in meses:\n for puerto in puertos:\n for carga in cargas:\n combinaciones.append({'mes':mes,'puerto':puerto,'carga':carga})\n return combinaciones\n\nB_m_d = generador_de_y(meses,puertos,cargas)\n\n\n# In[26]:\n\n\n# Q_m_c\n# cargas transportadas en el mes m al cliente c\n\nclientes = [i for i in range(1,4)]\ncargas = [i for i in range(0,10)]\nmeses = [i for i in range(1,13)]\n\ndef generador_de_q(clientes,cargas,meses):\n combinaciones = []\n for mes in meses:\n for cliente in clientes:\n for carga in cargas:\n combinaciones.append({'mes':mes,'cliente':cliente,'carga':carga})\n return combinaciones\n\nQ_m_c = generador_de_q(clientes,cargas,meses)\n\n\n# In[22]:\n\n\n# S_m\n# potenciales cargas por ventas cortas en el mes m\n\nminimo = 0 ############ SE DEBEN DEFINIR BIEN \nmaximo = 10\nS_m = [i for i in range(minimo,maximo)]\n\n","sub_path":"Variables de decisión.py","file_name":"Variables de decisión.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"299874930","text":"import sys\nimport zipfile\nimport com.neo.sk.mxnetStart.utils as gb\nfrom mxnet import init, gluon\nfrom mxnet.gluon import loss as gloss, data as gdata, utils as gutils, model_zoo\nimport os\nsys.path.append('..')\n\n\n\"\"\"\n迁移学习的小例子,待网好的时候将图片下载\ncreated by byf on 2018/7/11\n\"\"\"\n\n\ndata_dir = '../data'\nbase_url = 'https://apache-mxnet.s3-accelerate.amazonaws.com/'\nfname = gutils.download(\n base_url + 'gluon/dataset/hotdog.zip'\n)\nwith zipfile.ZipFile(fname, 'r') as z:\n z.extractall(data_dir)\n\n\ntrain_imgs = gdata.vision.ImageFolderDataset(\n os.path.join(data_dir, 'hotdog/train'))\ntest_imgs = gdata.vision.ImageFolderDataset(\n os.path.join(data_dir, 'hotdog/test'))\nhotdogs = [train_imgs[i][0] for i in range(8)] # 训练集的前8张照片\nnot_hotdogs = [train_imgs[-i-1][0] for i in range(8)] # 训练集的后8张图片\ngb.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4); # 加分号只显示图。\n\n\n# 指定 RGB 三个通道的均值和方差来将图片通道归一化。\nnormalize = gdata.vision.transforms.Normalize(\n [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\ntrain_augs = gdata.vision.transforms.Compose([\n gdata.vision.transforms.RandomResizedCrop(224),\n gdata.vision.transforms.RandomFlipLeftRight(),\n gdata.vision.transforms.ToTensor(),\n normalize,\n])\n\ntest_augs = gdata.vision.transforms.Compose([\n gdata.vision.transforms.Resize(256),\n gdata.vision.transforms.CenterCrop(224),\n gdata.vision.transforms.ToTensor(),\n normalize\n])\n\npretrained_net = model_zoo.vision.resnet18_v2(pretrained=True) # 下载预训练的参数\npretrained_net.output\n# feature output\nfinetune_net = model_zoo.vision.resnet18_v2(classes=2) # 最后的结果只有两类\nfinetune_net.features = pretrained_net.features\nfinetune_net.output.initialize(init.Xavier())\n\n\ndef train(net, learning_rate, batch_size=128, epochs=5):\n train_iter = gdata.DataLoader(\n train_imgs.transform_first(train_augs), batch_size, shuffle=True)\n test_iter = gdata.DataLoader(\n test_imgs.transform_first(test_augs), batch_size)\n\n ctx = gb.try_all_gpus()\n net.collect_params().reset_ctx(ctx)\n net.hybridize()\n loss = gloss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {\n 'learning_rate': learning_rate, 'wd': 0.001})\n gb.train(train_iter, test_iter, net, loss, trainer, ctx, epochs)\n\n\ntrain(finetune_net, 0.01)\n\n\n# 对比新训练的模型\nscratch_net = model_zoo.vision.resnet18_v2(classes=2)\nscratch_net.initialize(init=init.Xavier())\ntrain(scratch_net, 0.1)","sub_path":"com/neo/sk/mxnetStart/vision/fineTuning.py","file_name":"fineTuning.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"103394055","text":"import asyncio\nfrom itertools import count\nfrom operator import le\nimport discord\nfrom discord.ext import commands\nfrom discord.utils import get\nimport requests\nimport json\nimport random\nfrom datetime import datetime\nimport pytz \nimport logging\nfrom modules.dbcontrol import *\nlogger = logging.getLogger(\"AntiCapsLog\")\nlogger.setLevel(logging.INFO)\nlogger_handler = logging.FileHandler(\"anticaps.log\")\nlogger_handler.setLevel(logging.INFO)\nlogger_formatter = logging.Formatter('%(message)s')\nlogger_handler.setFormatter(logger_formatter)\nlogger.addHandler(logger_handler)\n\n\ndef log(logs):\n tz_NY = pytz.timezone('Europe/Moscow') \n datetime_m = datetime.now(tz_NY) \n t = datetime_m.strftime('%d/%m/%Y %H:%M:%S')\n logger.info(f\"{t} {str(logs)}\")\n\nclass AntiCaps(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n\n global count_up\n global count_down\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if message.author.bot:\n return\n try:\n if message.channel.id != get_system(db, message.guild.id, \"spam_channel\") and message.guld.id != 110373943822540800:\n\n count_up = 0\n count_down = 0\n for letter in message.content:\n if letter.isalpha():\n if letter == letter.upper():\n count_up += 1\n else:\n count_down += 1\n\n if count_up > 7 and count_up > count_down:\n await message.delete()\n await message.channel.send(f\"{message.author.mention}, не капси мне тут!!!!!\")\n except:\n pass \n \ndef setup(bot):\n bot.add_cog(AntiCaps(bot))","sub_path":"modules/anticaps.py","file_name":"anticaps.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"52535644","text":"#!/usr/bin/env python\n\nimport math\nimport itertools\n\ndef is_prime(n):\n\tif n == 1:\n\t\treturn False\n\tif n == 2:\n\t\treturn True\n\tsqrt_n = math.sqrt(n)\n\tfor i in range(2, int(math.ceil(sqrt_n)) + 1):\n\t\tif n % i == 0:\n\t\t\treturn False\n\treturn True\n\ndef M(p, q, N):\n\tm = 0\n\ta = 1\n\twhile p**a * q <= N:\n\t\tb = 1\n\t\twhile p**a * q**b <= N:\n\t\t\tt = p**a * q ** b\n\t\t\tif t > m:\n\t\t\t\tm = t\n\t\t\tb+=1\n\t\ta+=1\n\treturn m\n\ndef solve():\n#limit = 100\n\tlimit = 10000000\n\tl = []\n\ts = 0\n\tfor i in range(2, limit+1):\n\t\tif is_prime(i):\n\t\t\tl.append(i)\n\tfor x in itertools.combinations(l, 2):\n\t\tif x[0]**2 > limit:\n\t\t\tbreak\n\t\tif x[0]*x[1] > limit:\n\t\t\tcontinue\n\t\tm = M(x[0], x[1], limit)\n\t\ts += m\n\tprint('answer: ', s)\n\nif __name__ == '__main__':\n\tsolve()\n\n","sub_path":"pe347/python/pe.py","file_name":"pe.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"451447249","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom NeuralNetwork import NeuralNetwork\n\nuse_cols = ['stock_symbol', 'Total_Transactions', 'Total_Traded_Shares', 'Total_Traded_Amount',\n 'Opening_Price', 'Max_Price', 'Min_Price', 'Closing_Price', 'Next_Day_Closing_Price']\n\ndf = pd.read_csv(\"stocks_data.csv\", usecols=use_cols)\ndata_np = df.to_numpy()\n\n\ndef normalize(data_col):\n output = np.zeros(data_col.shape)\n\n max_of_row = data_col[0][0]\n min_of_row = data_col[0][0]\n\n for row in data_col:\n if row[0] > max_of_row:\n max_of_row = row[0]\n if row[0] < min_of_row:\n min_of_row = row[0]\n\n for index, data in enumerate(data_col):\n output[index] = round((data[0] - min_of_row) / (max_of_row - min_of_row), 2)\n\n return output\n\n\ndef get_data_of(stock_symbol):\n symbol = np.asarray(data_np[:, [0]])\n indices = [i for i, x in enumerate(symbol) if x == stock_symbol]\n return data_np[indices]\n\n\ndef purge(data_of_company):\n data = np.asarray(data_of_company)\n indices = [i for i, x in enumerate(data[:, [7]]) if x != 0]\n return data_of_company[indices]\n\n\ndef main(stock_symbol, purge_bool=False, train_split=0.85, show_cp_plot=False):\n\n data_of_symbol = get_data_of(stock_symbol)\n if purge_bool:\n data_of_symbol = purge(data_of_symbol)\n input_x_tt = data_of_symbol[:, [1]]\n input_x_tts = data_of_symbol[:, [2]]\n input_x_tta = data_of_symbol[:, [3]]\n input_x_op = data_of_symbol[:, [4]]\n input_x_max_p = data_of_symbol[:, [5]]\n input_x_min_p = data_of_symbol[:, [6]]\n input_x_cp = data_of_symbol[:, [7]]\n\n closing_price = data_of_symbol[:, [7]]\n next_day_closing_price = data_of_symbol[:, [8]]\n diff = closing_price - next_day_closing_price\n y = (diff < 0).astype(int)\n\n normalized_x_tt = normalize(input_x_tt)\n\n normalized_x_tts = normalize(input_x_tts)\n normalized_x_tta = normalize(input_x_tta)\n normalized_x_op = normalize(input_x_op)\n normalized_x_max_p = normalize(input_x_max_p)\n normalized_x_min_p = normalize(input_x_min_p)\n normalized_x_cp = normalize(input_x_cp)\n\n normalized_x = np.concatenate((normalized_x_tt, normalized_x_tts, normalized_x_tta,\n normalized_x_op, normalized_x_max_p, normalized_x_min_p, normalized_x_cp), axis=1)\n\n split = int(train_split * len(y))\n train_x, train_y = normalized_x[:split, :], y[:split, :]\n test_x, test_y = normalized_x[split:, :], y[split:, :]\n\n neural_net = NeuralNetwork(train_x, train_y)\n for _ in range(1500):\n neural_net.feedforward()\n neural_net.backprop()\n\n neural_net.evaluate(test_x, test_y)\n print((NeuralNetwork.accuracy/test_y.size) * 100, '%')\n\n if show_cp_plot:\n plt.plot(input_x_cp)\n plt.show()\n\n\nif __name__ == '__main__':\n\n main(stock_symbol='ADBL', purge_bool=True, train_split=0.85 )\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"93174418","text":"from django.urls import path, re_path\nfrom . import views\nfrom .models import Vehicle, Customer\n\nurlpatterns = [\n\n # vehicle related URLs\n path('vehicles/', views.VehicleList.as_view()),\n path('available_vehicles/',\n views.VehicleList.as_view(queryset=Vehicle.objects.filter(availability=True))),\n path('not_available_vehicles/',\n views.VehicleList.as_view(queryset=Vehicle.objects.filter(availability=False))),\n path('vehicles/
/', views.VehicleDetail.as_view()),\n\n # customer related URLs\n path('customers/', views.CustomerList.as_view()),\n path('customers//', views.CustomerDetail.as_view()),\n re_path('customers/(?P.+)/', views.CustomerList.as_view()),\n\n # batch related URLs\n path('batch/', views.BatchList.as_view()),\n path('batch//', views.BatchDetail.as_view()),\n\n # solver related URLs\n path('solve///', views.SolverView.as_view()),\n\n # solution related URLs\n path('solution//', views.SolutionView.as_view()),\n\n # address related URLs\n path('address/', views.AddressList.as_view()),\n path('address//', views.AddressDetail.as_view()),\n\n]\n","sub_path":"vrp_project/vrp_project/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"609973158","text":"from django.contrib import admin\nfrom django_summernote.admin import SummernoteModelAdmin\nfrom .models import *\nfrom .models import Sms\nfrom .utils import VumiSmsApi\n\n\nclass PageAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"description\")\n search_fields = (\"name\", \"description\")\n fieldsets = [\n (None, {\"fields\": [\"name\", \"description\"]})\n ]\n\n\nclass PostAdmin(SummernoteModelAdmin):\n list_display = (\"name\", \"description\")\n list_filter = (\"course\", )\n search_fields = (\"name\", \"description\")\n fieldsets = [\n (None,\n {\"fields\": [\"name\", \"description\", \"course\", \"publishdate\"]}),\n (\"Content\",\n {\"fields\": [\"big_image\", \"small_image\", \"moderated\", \"content\"]})\n ]\n\n\nclass ChatMessageInline(admin.TabularInline):\n model = ChatMessage\n extra = 0\n readonly_fields = (\"author\", \"content\", \"publishdate\")\n ordering = (\"publishdate\",)\n\n def has_add_permission(self, request):\n return False\n\n\nclass ChatGroupAdmin(SummernoteModelAdmin):\n list_display = (\"name\", \"course\", \"description\")\n list_filter = (\"course\", )\n search_fields = (\"name\", \"description\")\n fieldsets = [\n (None, {\"fields\": [\"name\", \"description\", \"course\"]})\n ]\n inlines = (ChatMessageInline, )\n\n\nclass DiscussionAdmin(admin.ModelAdmin):\n list_display = (\"course\", \"module\", \"question\", \"author\", \"publishdate\",\n \"content\", \"moderated\")\n list_filter = (\"course\", \"module\", \"question\", \"moderated\")\n search_fields = (\"author\", \"content\")\n fieldsets = [\n (None,\n {\"fields\": [\"name\", \"description\"]}),\n (\"Content\",\n {\"fields\": [\"content\", \"author\", \"publishdate\", \"moderated\"]}),\n (\"Discussion Group\",\n {\"fields\": [\"course\", \"module\", \"question\", \"response\"]})\n ]\n\n\nclass MessageAdmin(SummernoteModelAdmin):\n list_display = (\"name\", \"course\", \"author\", \"direction\", \"publishdate\")\n list_filter = (\"course\", \"direction\")\n search_fields = (\"name\", \"author\")\n fieldsets = [\n (None,\n {\"fields\": [\"name\", \"course\", \"author\", \"direction\",\n \"publishdate\"]}),\n (\"Content\",\n {\"fields\": [\"content\"]})\n ]\n\n\nclass SmsAdmin(SummernoteModelAdmin):\n list_display = (\"msisdn\", \"date_sent\", \"message\")\n\n\n# Communication\nadmin.site.register(Sms, SmsAdmin)\nadmin.site.register(Post, PostAdmin)\nadmin.site.register(Message, MessageAdmin)\nadmin.site.register(ChatGroup, ChatGroupAdmin)\nadmin.site.register(Discussion, DiscussionAdmin)\n","sub_path":"communication/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"567538762","text":"from student import Student\nimport time\n\ndef decorator(delegate):\n def wrapper(*args):\n print('Before delegate')\n result = delegate(*args)\n print('After delegate')\n return result\n return wrapper;\n\ndef profile(unit = 'ns'):\n def inner(delegate):\n def wrapper(*args):\n before = time.time_ns()\n result = delegate(*args)\n after = time.time_ns()\n exec_time = after - before\n exec_time_unit = 'ns'\n if(unit == 'ms'):\n exec_time /= 1000000\n exec_time_unit = 'ms'\n print(f'Function \\'{delegate.__name__}\\' executed for: {exec_time} {exec_time_unit}')\n return result\n return wrapper\n return inner\n\n@decorator\n@profile('ms')\ndef print_name(name):\n sum = 0\n for i in range(1, 100000):\n sum += i\n print(f'Name: {name}')\n return True\n\n@profile('ms')\n@decorator\ndef print_courses(student):\n sum = 0\n for i in range(1,1000000):\n sum += i\n print(student.courses)\n return len(student.courses)\n\nif __name__ == '__main__':\n res = print_name('Python')\n print(res)\n num_courses = print_courses(Student('7009122345', 'Dimitar', 'Georgiev', 'Plovdiv', '+359889675432',\n ['Algebra', 'SDP', 'Calculus', 'Internet Programming']),)\n print(num_courses)","sub_path":"06-sdp-oop/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"}
+{"seq_id":"636575504","text":"#!/usr/bin/env python\nimport inspect\nimport logging\nimport os\nimport threading\nimport zipfile\nfrom math import atan, cos, exp, log, pi, radians, sin, tan\nfrom Queue import Queue\n\nimport mapnik\nfrom config import CONFIG\nfrom osgeo import ogr, osr\n\nDEG_TO_RAD = pi / 180\nRAD_TO_DEG = 180 / pi\nSUPERDIR = os.path.dirname(\n os.path.abspath(inspect.getfile(inspect.currentframe()))\n) # script directory\nONE = os.path.join(SUPERDIR, \"assets\", \"1.png\")\nTWO = os.path.join(SUPERDIR, \"assets\", \"2.png\")\nTHREE = os.path.join(SUPERDIR, \"assets\", \"3.png\")\nFOUR = os.path.join(SUPERDIR, \"assets\", \"4.png\")\nFIVE = os.path.join(SUPERDIR, \"assets\", \"5.png\")\nONE_BIG = os.path.join(SUPERDIR, \"assets\", \"1_big.png\")\nTWO_BIG = os.path.join(SUPERDIR, \"assets\", \"2_big.png\")\nTHREE_BIG = os.path.join(SUPERDIR, \"assets\", \"3_big.png\")\nFOUR_BIG = os.path.join(SUPERDIR, \"assets\", \"4_big.png\")\nFIVE_BIG = os.path.join(SUPERDIR, \"assets\", \"5_big.png\")\n# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available\nNUM_THREADS = CONFIG.TILE_RASTER_MAX_THREADS\nSIZE_X = 256\nSIZE_Y = 256\n\n\ndef deg2num(lat_deg, lon_deg, zoom):\n lat_rad = radians(lat_deg)\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((1.0 - log(tan(lat_rad) + (1 / cos(lat_rad))) / pi) / 2.0 * n)\n return (xtile, ytile, zoom)\n\n\ndef tiles4BBox(domain, levels):\n tiles = {}\n for level in levels:\n tileset = []\n tile_min = deg2num(domain[3], domain[0], level)\n tile_max = deg2num(domain[1], domain[2], level)\n logging.debug(\"tile min:\\t{0}\\tmax:\\t{1}\".format(tile_min, tile_max))\n for x in range(tile_min[0], tile_max[0] + 1):\n for y in range(tile_min[1], tile_max[1] + 1):\n tile = (x, y, level)\n tileset.append(tile)\n tiles[str(level)] = tileset\n return tiles\n\n\ndef minmax(a, b, c):\n a = max(a, b)\n a = min(a, c)\n return a\n\n\ndef zipdir(path, zip):\n for root, dirs, files in os.walk(path):\n for file in files:\n f = os.path.join(root, file).replace(path, \"\")\n zip.write(os.path.join(root, file), arcname=f)\n\n\ndef getLayerInfo(shp_file):\n layer_info = {\"min_speed\": -1, \"max_speed\": -1, \"extents\": {}, \"native_wkid\": \"\"}\n\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n data_source = driver.Open(shp_file, 0)\n layer = data_source.GetLayer()\n\n # get the extent and transform\n native_srs = layer.GetSpatialRef()\n native_extent = layer.GetExtent()\n layer_info[\"native_wkid\"] = native_srs.GetAuthorityCode(None)\n layer_info[\"extents\"][native_srs.GetAuthorityCode(None)] = {\n \"xmin\": native_extent[0],\n \"ymin\": native_extent[2],\n \"xmax\": native_extent[1],\n \"ymax\": native_extent[3],\n \"proj4string\": native_srs.ExportToProj4(),\n }\n\n # Lon/Lat WGS84\n multipoint = ogr.Geometry(ogr.wkbMultiPoint)\n point1 = ogr.Geometry(ogr.wkbPoint)\n point1.AddPoint(native_extent[0], native_extent[2])\n multipoint.AddGeometry(point1)\n point2 = ogr.Geometry(ogr.wkbPoint)\n point2.AddPoint(native_extent[1], native_extent[3])\n multipoint.AddGeometry(point2)\n\n target_srs = osr.SpatialReference()\n target_srs.ImportFromEPSG(4326)\n transform = osr.CoordinateTransformation(native_srs, target_srs)\n multipoint.Transform(transform)\n\n layer_info[\"extents\"][\"4326\"] = {\n \"xmin\": multipoint.GetGeometryRef(0).GetPoint(0)[0],\n \"ymin\": multipoint.GetGeometryRef(0).GetPoint(0)[1],\n \"xmax\": multipoint.GetGeometryRef(1).GetPoint(0)[0],\n \"ymax\": multipoint.GetGeometryRef(1).GetPoint(0)[1],\n \"proj4string\": target_srs.ExportToProj4(),\n }\n\n # WebMercator 3857\n target_srs = osr.SpatialReference()\n target_srs.ImportFromEPSG(3857)\n input_srs = osr.SpatialReference()\n input_srs.ImportFromEPSG(4326)\n transform = osr.CoordinateTransformation(input_srs, target_srs)\n multipoint.Transform(transform)\n layer_info[\"extents\"][\"3857\"] = {\n \"xmin\": multipoint.GetGeometryRef(0).GetPoint(0)[0],\n \"ymin\": multipoint.GetGeometryRef(0).GetPoint(0)[1],\n \"xmax\": multipoint.GetGeometryRef(1).GetPoint(0)[0],\n \"ymax\": multipoint.GetGeometryRef(1).GetPoint(0)[1],\n \"proj4string\": target_srs.ExportToProj4(),\n }\n\n # Get the min/max wind speed\n sql = 'SELECT MIN(speed), MAX(speed) FROM \"{0}\"'.format(layer.GetName())\n query = data_source.ExecuteSQL(sql)\n feature = query.GetFeature(0)\n layer_info[\"min_speed\"] = feature.GetField(\"MIN_speed\")\n layer_info[\"max_speed\"] = feature.GetField(\"MAX_speed\")\n\n return layer_info\n\n\nclass GoogleProjection:\n def __init__(self, levels=18):\n self.Bc = []\n self.Cc = []\n self.zc = []\n self.Ac = []\n c = SIZE_X\n for d in range(0, levels):\n e = c / 2\n self.Bc.append(c / 360.0)\n self.Cc.append(c / (2 * pi))\n self.zc.append((e, e))\n self.Ac.append(c)\n c *= 2\n\n def fromLLtoPixel(self, ll, zoom):\n d = self.zc[zoom]\n e = round(d[0] + ll[0] * self.Bc[zoom])\n f = minmax(sin(DEG_TO_RAD * ll[1]), -0.9999, 0.9999)\n g = round(d[1] + 0.5 * log((1 + f) / (1 - f)) * -self.Cc[zoom])\n return (e, g)\n\n def fromPixelToLL(self, px, zoom):\n e = self.zc[zoom]\n f = (px[0] - e[0]) / self.Bc[zoom]\n g = (px[1] - e[1]) / -self.Cc[zoom]\n h = RAD_TO_DEG * (2 * atan(exp(g)) - 0.5 * pi)\n return (f, h)\n\n\nclass RenderThread:\n def __init__(\n self, tile_dir, data_file, proj4string, max_speed, q, printLock, maxZoom\n ):\n self.tile_dir = tile_dir\n self.q = q\n self.m = mapnik.Map(SIZE_X, SIZE_Y)\n\n one = max_speed / 5 # 6\n two = str(one * 2) # 8\n three = str(one * 3) # 10\n four = str(4 * one) # 12\n one = str(one)\n\n self.printLock = printLock\n # Load style XML\n\n # NOTE:\n # Buffer_size = 2048 prevents cutting off arrow markers during draw.\n # Draws entire map then cuts the image into tiles.\n xml = \"\"\"\n \n \"\"\".format(\n data=data_file,\n proj=proj4string,\n low=one,\n med=two,\n high=three,\n v_high=four,\n blue=ONE,\n green=TWO,\n yellow=THREE,\n orange=FOUR,\n red=FIVE,\n )\n\n logging.debug(xml)\n\n mapnik.load_map_from_string(self.m, xml, True)\n # mapnik.load_map(self.m, mapfile, True)\n\n # Obtain