diff --git "a/5439.jsonl" "b/5439.jsonl" new file mode 100644--- /dev/null +++ "b/5439.jsonl" @@ -0,0 +1,306 @@ +{"seq_id":"71808507254","text":"### partially from BICEP team\n### def bandpower window function \n\nimport camb\nimport numpy as np\nfrom numpy import linalg as LA\nfrom scipy.linalg import sqrtm\n\n# def GenBB(r = 0.05, raw_cl = True):\n# '''\n# Generate the theoretical power spectra using camb\n# '''\n \n# pars = camb.CAMBparams()\n# pars.set_cosmology(H0=67.26, ombh2=0.022, omch2=0.1199, mnu=0.06, omk=0, tau=0.078)\n# pars.InitPower.set_params(As=2.19856*1e-9, ns=0.9652, r = r)\n# pars.set_for_lmax(3000, lens_potential_accuracy=1)\n# pars.WantTensors = True\n \n# results = camb.get_results(pars)\n# powers =results.get_cmb_power_spectra(pars, CMB_unit='muK', raw_cl=raw_cl)\n \n# totCL=powers['tensor'] ## TT EE BB TE\n# # ell = np.arange(len(totCL.T[0]))\n# return totCL.T[2]\n\n\ndef Gencl(r = 0.05, raw_cl = True, tensorBB_only = False):\n '''\n Generate the theoretical power spectra using camb\n '''\n \n pars = camb.CAMBparams()\n pars.set_cosmology(H0=67.26, ombh2=0.022, omch2=0.1199, mnu=0.06, omk=0, tau=0.078)\n pars.InitPower.set_params(As=2.19856*1e-9, ns=0.9652, r = r)\n pars.set_for_lmax(3000, lens_potential_accuracy=1)\n pars.WantTensors = True\n \n results = camb.get_results(pars)\n powers =results.get_cmb_power_spectra(pars, CMB_unit='muK', raw_cl=raw_cl)\n \n if tensorBB_only:\n totCL=powers['tensor'] ## TT EE BB TE\n \n return totCL.T[2]\n \n else: \n \n totCL=powers['total'] ## TT EE BB TE\n\n return totCL.T\n \n\ndef l2(ell):\n '''\n get the l^2/np.pi\n '''\n \n return ell*(ell+1)/2/np.pi\n\ndef calc_vecp(l,C_l_hat,C_fl, C_l):\n\n C_fl_12 = sqrtm(C_fl[l])\n C_l_inv = LA.pinv(C_l[l])\n C_l_inv_12= sqrtm(C_l_inv)\n # the order is inverted compared to matlab hamimeche_lewis_likelihood.m line 19\n \n # line 20 of hamimeche_lewis_likelihood.m\n res = np.dot(C_l_inv_12, np.dot(C_l_hat[l], C_l_inv_12))\n\n [d, u] = LA.eigh(res)\n d = np.diag(d) # noticed that python returns the eigenvalues as a vector, not a matrix\n #np. dot( u, np.dot( np.diag(d), LA.inv(u))) should be equals to res\n # real symmetric matrices are diagnalized by orthogonal matrices (M^t M = 1) \n\n # this makes a diagonal matrix by applying g(x) to the eigenvalues, equation 10 in Barkats et al\n gd = np.sign(np.diag(d) - 1) * np.sqrt(2 * (np.diag(d) - np.log(np.diag(d)) - 1))\n gd = np.diag(gd);\n # Argument of vecp in equation 8; multiplying from right to left \n X = np.dot(np.transpose(u), C_fl_12)\n X = np.dot(gd, X)\n X = np.dot(u, X)\n X = np.dot(C_fl_12, X)\n # This is the vector of equation 7 \n X = vecp(X)\n\n return (X)\n\ndef vecp(mat):\n '''\n This returns the unique elements of a symmetric matrix \n '''\n\n dim = mat.shape[0]\n \n vec = np.zeros(int(dim*(dim+1)/2))\n counter = 0\n for iDiag in range(0,dim):\n vec[counter:counter+dim-iDiag] = np.diag(mat,iDiag)\n \n counter = counter + dim - iDiag\n\n return vec\n\n\ndef Marray_EEfirst(cl_f_all, nf_ind, Nmode,lbin, SamNum):\n '''\n Get the re-arranged array for Mcc'.\n -------------------------------------------\n Input\n \n cl_f_all, (SamNum, Nmode, lbin, nf, nf)\n \n -------------------------------------------\n Output\n \n marray, (Nmode*lbin*nf_ind , SamNum) for EE first.\n '''\n marray = np.zeros(((Nmode*lbin*nf_ind), SamNum)) # mode(EE, EB, BB), l-bin, nf independent corr between frequencies \n for n in range(SamNum):\n\n for mode in range(Nmode): \n\n cl_flat = np.zeros((lbin, nf_ind)) ## collect independent corr for each l-bin\n\n for ell in range(lbin):\n cl_flat[ell] = vecp(cl_f_all[n][mode][ell])# - nl_mean[2][ell] ) ##########!!!!!!!!!!!!!!!!!! need to subtract noise?? 06.27\n\n marray[mode*lbin*nf_ind:(mode+1)*lbin*nf_ind,n] = cl_flat.flatten()\n \n return marray\n\ndef Minv(M, lbin, nf_ind):\n '''\n Get re-organized M_inv for the calculation of Likelihood.\n M: (lbin*nf_ind, lbin*nf_ind), just BB mode for mow. 2020/06/29\n \n Output:\n M_inv, (lbin, lbin, nf_ind, nf_ind)\n '''\n cov_mat_inv = LA.inv(M)\n _M_inv = np.ones((lbin, lbin, nf_ind, nf_ind))\n\n for l in range(lbin):\n\n for lp in range(lbin):\n\n _M_inv[l,lp, :, :] = cov_mat_inv[l*nf_ind:(l+1)*nf_ind, lp*nf_ind:(lp+1)*nf_ind]\n \n return _M_inv\n\n\ndef evaluateLikelihood(C_l,C_l_hat,C_fl,M_inv, sbin = 0):\n '''\n To evaluate the likelihood itself.\n \n ------------------------------------------\n Input\n \n sbin: start-bin number\n '''\n logL = 0; lbin = C_l.shape[0]\n # Calculate X vector (Eq 8) for each l, lp\n for l in range(sbin, lbin):\n X = calc_vecp(l,C_l_hat,C_fl,C_l)\n \n for lp in range(sbin, lbin):\n Xp = calc_vecp(lp,C_l_hat,C_fl,C_l)\n M_inv_pp = M_inv[l,lp,:,:]\n # calculate loglikelihood (Eq 7)\n thislogL = (-0.5)*np.dot(X,np.dot(M_inv_pp,Xp))\n logL = logL + thislogL\n\n if np.isnan(logL):\n logL = -1e20\n\n logL = np.real(logL)\n return logL\n\n\ndef calc_vecp_test(cl_hat,cl_f, cl_th, Nf, Nmodes = None):\n '''\n Input\n ---------------------------\n Cl : (lbin, Nf, Nf);\n Nf : number of frequency channels;\n Nmodes: consider different modes like EE EB and BB; Only BB for now. 2020.07.04\n \n Output\n ---------------------------\n Xall : rearanged to one line, as like lbin first, then nf_ind \n '''\n lbin = len(cl_hat); nf_ind = int(Nf*(Nf+1)/2)\n \n Xall = np.ones(lbin*nf_ind) \n for l in range(lbin):\n \n cl_f_12 = sqrtm(cl_f[l])\n cl_inv = LA.pinv(cl_th[l])\n cl_inv_12= sqrtm(cl_inv)\n \n res = np.dot(cl_inv_12, np.dot(cl_hat[l], cl_inv_12))\n\n [d, u] = LA.eigh(res)\n\n # this makes a diagonal matrix by applying g(x) to the eigenvalues, equation 10 in Barkats et al\n gd = np.sign(d - 1) * np.sqrt(2 * (d - np.log(d) - 1))\n gd = np.diag(gd);\n # Argument of vecp in equation 8; multiplying from right to left \n X = np.dot(np.transpose(u), cl_f_12)\n X = np.dot(gd, X)\n X = np.dot(u, X)\n X = np.dot(cl_f_12, X)\n # This is the vector of equation 7 \n Xall[l*nf_ind:(l+1)*nf_ind] = vecp_jx(X)\n\n return (Xall)\n\ndef testL(cl_hat,cl_f, cl_th, Nf, M_inv, Nmodes = None, sbin = None, ebin = None):\n \n '''\n Input\n ------------------------------\n \n cl_hat, lbin*Nf*Nf\n cl_f, lbin*Nf*Nf\n cl_th, Nf, M, Nmodes = None, sbin = None, ebin = None\n \n M: covariance of all X arrays, reordered to be a line for each Xall...\n '''\n \n Xa = (calc_vecp_test(cl_hat, cl_f,cl_th, Nf = Nf))\n \n# M_inv = LA.inv(M);\n \n if sbin is not None:\n \n nf_ind = int(Nf*(Nf+1)/2)\n start = sbin*nf_ind; end = ebin*nf_ind;\n \n Xa = Xa[start: end]; \n M_inv = M_inv[start:end,start:end]\n# print(Xa.shape); \n# print(M_inv.shape);\n \n Xa = np.matrix(Xa);\n logL = -0.5*Xa*M_inv*np.transpose(Xa) ## 1*1 matrix, use logL[0,0] to extract number\n \n if np.isnan(logL[0,0]):\n logL[0,0] = -1e30\n \n return (logL[0,0])\n\n\n##################################################\n############# Gauss Likelihood #################\n\ndef vecp_jx(mat):\n y = np.triu(mat, k = 0)\n vecp_y = y[y != 0]\n return vecp_y\n\ndef calculate_vecp_ga(cl_hat, Nf, vecp_f = 'vecp'):\n \n '''\n Turn the matrix into X-form. For Gaussian-Likelihood.\n '''\n if vecp_f == 'vecp':\n vecp_in = vecp;\n else:\n vecp_in = vecp_jx;\n \n lbin = len(cl_hat); nf_ind = int(Nf*(Nf+1)/2);\n Xall = np.ones(lbin*nf_ind);\n for l in range(lbin):\n Xall[l*nf_ind:(l+1)*nf_ind] = vecp_in(cl_hat[l])\n \n return(Xall)\n\n\n## -2lnL = (C_hat - C)*M^-1*(C_hat - C ^T)\ndef simple_likelihood(cl_hat,cl_th, Nf, M_inv, sbin = None, ebin = None):\n \n Xa = calculate_vecp_ga(cl_hat - cl_th, Nf);\n# M_inv = LA.inv(M);\n\n if sbin is not None:\n \n nf_ind = int(Nf*(Nf+1)/2)\n start = sbin*nf_ind; end = ebin*nf_ind;\n \n Xa = Xa[start: end]; \n M_inv = M_inv[start:end,start:end]\n \n Xa = np.matrix(Xa);\n \n logL = -0.5*Xa*M_inv*np.transpose(Xa)\n \n return(logL[0,0])\n","repo_name":"yaojian95/Likelihood","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19739976201","text":"l = []\nn = int(input(\"Enter number of pages : \"))\nf = int(input(\"Enter number of frames : \"))\nfor i in range(n):\n l.append(int(input(f\"Enter page {i+1} : \")))\npages = []\nfault = 0\nfor i in range(n):\n if len(pages)= 145852 and i <= 616942:\n total_combinations += 1\n\n print('Total Combinations (Part1):', total_combinations)\n\n total_combinations = 0\n for i in range(999999):\n string = \"{:06d}\".format(i)\n\n if contains_2_adjacent_digits_strict(string) and digits_same_or_increase(string) and i >= 145852 and i <= 616942:\n total_combinations += 1\n print('Total Combinations (Part2):', total_combinations)\n\n","repo_name":"jenkins957/advent-of-code","sub_path":"advent_of_code_2019/day04_secure_container.py","file_name":"day04_secure_container.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26362705087","text":"from string import ascii_uppercase as letters\n\ndef convert(n, base):\n number = []\n while n:\n temp = n % base\n temp = letters[temp - 10] if (temp >= 10) else temp\n number.append(str(temp))\n n = n // base\n return ''.join(number[::-1])\n\nN = int(input(\"Enter the number: \"))\nB = int(input(\"Enter the base: \"))\nprint(f\"Converted Number: {convert(N, B)}\")","repo_name":"J16N/python-lab","sub_path":"Assignment-2/p7.py","file_name":"p7.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28003303987","text":"from __future__ import absolute_import, print_function\n\nimport re\n\nfrom flask_babelex import lazy_gettext as _\n\nusername_regex = re.compile('^[a-zA-Z][a-zA-Z0-9-_]{2}[a-zA-Z0-9-_]*$')\n\"\"\"Username rules.\"\"\"\n\nUSERNAME_RULES = _(\n 'Username must start with a letter, be at least three characters long and'\n ' only contain alphanumeric characters, dashes and underscores.')\n\"\"\"Description of username validation rules.\n\n.. note:: Used for both form help text and for form validation error.\"\"\"\n\n\ndef validate_username(username):\n \"\"\"Validate the username.\n\n See :data:`invenio_userprofiles.validators.username_regex` to know which\n rules are applied.\n\n :param username: The user name.\n :raises ValueError: If validation fails.\n \"\"\"\n if not username_regex.match(username):\n raise ValueError(USERNAME_RULES)\n","repo_name":"N03/invenio","sub_path":".virtualenvs/invenio/lib/python2.7/site-packages/invenio_userprofiles/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74068547573","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"coffeece\",\n version=\"1.1.0\",\n author=\"FARBEX97\",\n author_email=\"fernandoarbexcv@gmail.com\",\n description=\"Python useful classes for office work automation\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/FARBEX97/coffeece\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"FARBEX97/coffeece","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2982500233","text":"class Solution:\n # @param {string} s\n # @return {integer}\n def lengthOfLastWord(self, s):\n length = 0\n meet_char = False\n for c in s[::-1]:\n if c != ' ':\n meet_char = True\n length = length + 1\n elif meet_char != False:\n break\n return length\n","repo_name":"tobygameac/Problem-Solving","sub_path":"LeetCode/058 Length of Last Word.py","file_name":"058 Length of Last Word.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72358131572","text":"import unittest\n\nfrom roundup.support import wrap\n\nclass WrapTestCase(unittest.TestCase):\n def testWrap(self):\n lorem = '''Lorem ipsum dolor sit amet, consectetuer adipiscing elit.'''\n wrapped = '''Lorem ipsum dolor\nsit amet,\nconsectetuer\nadipiscing elit.'''\n self.assertEquals(wrap(lorem, 20), wrapped)\n","repo_name":"jerrykan/herder","sub_path":"test/test_textfmt.py","file_name":"test_textfmt.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43342213821","text":"import sys\nimport uuid\nfrom cassandra.cqlengine import columns\nfrom django_cassandra_engine.models import DjangoCassandraModel, DjangoCassandraModelMetaClass\nfrom django.urls import reverse\n\n\nclass ConsistencyForMultipleModels:\n def save(self):\n \"\"\"\n Сохранение сущности в связанных колоночных семействах\n \"\"\"\n class_list = (getattr(sys.modules[__name__], model) for model in self.models_list)\n obj = super(DjangoCassandraModel, self).save()\n obj_kwargs = {item[0]: item[1] for item in self.items()}\n for model_class in class_list:\n if self.__class__ != model_class:\n sub_obj = model_class(**obj_kwargs)\n super(DjangoCassandraModel, sub_obj).save()\n return obj\n\n\nclass CategoryByUrl(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса категории по id\n \"\"\"\n class Meta:\n get_pk_field = 'url'\n url = columns.Text(primary_key=True)\n name = columns.Text()\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('category', args=[self.url])\n\n\nclass Product(ConsistencyForMultipleModels, DjangoCassandraModel):\n \"\"\"\n Абстрактная модель сущности товара\n \"\"\"\n __abstract__ = True\n models_list = ('ProductByUrl', 'Products', 'ProductsSortedByRating')\n price = columns.Double()\n number_of_ratings = columns.Integer(default=0)\n title = columns.Text()\n description = columns.Text()\n\n def get_absolute_url(self):\n return reverse('product-detail', args=[self.cat_url, self.url])\n\n\nclass ProductByUrl(Product):\n \"\"\"\n Модель колоночного семейства для запроса товара по url\n \"\"\"\n class Meta:\n get_pk_field = 'cat_url'\n cat_url = columns.Text(primary_key=True)\n url = columns.Text(primary_key=True, clustering_order=\"ASC\")\n id = columns.TimeUUID()\n rating = columns.Double(default=0)\n\n\nclass Products(Product):\n \"\"\"\n Модель колоночного семейства для запроса товара по id\n \"\"\"\n class Meta:\n get_pk_field = 'cat_url'\n cat_url = columns.Text(primary_key=True)\n id = columns.TimeUUID(primary_key=True, clustering_order=\"ASC\")\n number_of_ratings = columns.Integer(default=0)\n rating = columns.Double(default=0)\n\n\nclass ProductsSortedByRating(Product):\n \"\"\"\n Модель колоночного семейства для запроса отсортированных товаров по рейтингу\n \"\"\"\n class Meta:\n get_pk_field = 'cat_url'\n cat_url = columns.Text(primary_key=True)\n rating = columns.Double(primary_key=True, clustering_order=\"DESC\", default=0)\n id = columns.TimeUUID(primary_key=True, clustering_order=\"DESC\")\n url = columns.Text()\n\n\nclass User(ConsistencyForMultipleModels, DjangoCassandraModel):\n \"\"\"\n Абстрактная модель сущности пользователя\n \"\"\"\n __abstract__ = True\n models_list = ('UserByEmail', 'UserById')\n password = columns.Text()\n\n # def get_absolute_url(self):\n # return reverse('product-detail', args=[self.cat_url, self.url])\n\n\nclass UserByEmail(User):\n \"\"\"\n Модель колоночного семейства для запроса пользователя по email\n \"\"\"\n class Meta:\n get_pk_field = 'email'\n email = columns.Text(primary_key=True)\n id = columns.TimeUUID()\n\n\nclass UserById(User):\n \"\"\"\n Модель колоночного семейства для запроса пользователя по id\n \"\"\"\n class Meta:\n get_pk_field = 'user_id'\n id = columns.TimeUUID(primary_key=True, default=uuid.uuid1)\n email = columns.Text()\n\n\nclass ReviewsByProduct(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса отзывов по id товара\n \"\"\"\n class Meta:\n get_pk_field = 'product_id'\n product_id = columns.TimeUUID(primary_key=True)\n review_id = columns.TimeUUID(primary_key=True, clustering_order=\"DESC\")\n user_id = columns.TimeUUID()\n date = columns.DateTime()\n text = columns.Text()\n mark = columns.Integer()\n\n\nclass ReviewsByUser(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса отзывов по id пользователя\n \"\"\"\n class Meta:\n get_pk_field = 'user_id'\n user_id = columns.TimeUUID(primary_key=True)\n review_id = columns.TimeUUID(primary_key=True, clustering_order=\"DESC\")\n product_id = columns.TimeUUID()\n text = columns.Text()\n mark = columns.Integer()\n date = columns.DateTime()\n\n\nclass ItemsByOrder(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса товаров по id заказа\n \"\"\"\n class Meta:\n get_pk_field = 'order_id'\n order_id = columns.TimeUUID(primary_key=True)\n product_id = columns.TimeUUID(primary_key=True, clustering_order=\"ASC\")\n count = columns.Integer()\n sum = columns.Double()\n\n\nclass OrdersByUser(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса заказов по id пользователя\n \"\"\"\n class Meta:\n get_pk_field = 'user_id'\n user_id = columns.TimeUUID(primary_key=True)\n order_id = columns.TimeUUID(primary_key=True, clustering_order=\"DESC\")\n count = columns.Integer()\n sum = columns.Double()\n\n\nclass CartByUser(DjangoCassandraModel):\n \"\"\"\n Модель колоночного семейства для запроса товаров в корзине по id пользователя\n \"\"\"\n class Meta:\n get_pk_field = 'user_id'\n user_id = columns.TimeUUID(primary_key=True)\n product_id = columns.TimeUUID(primary_key=True, clustering_order=\"ASC\")\n count = columns.Counter()\n","repo_name":"kovarden/cassandra-shop-website","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4351123900","text":"import faulthandler; faulthandler.enable()\n\nfrom collections import defaultdict\nfrom datetime import datetime\nimport gzip\nimport hashlib\nimport io\nimport json\nimport logging\nimport os\nfrom pathlib import Path\nimport pytz\nimport shutil\nimport socket\nimport sys\nimport traceback\n\nimport boto3\nimport botocore\nfrom github.GithubException import GithubException\nfrom PIL import Image, ImageOps\nimport requests\nimport rdflib\nfrom rdflib import URIRef, Literal\nfrom rdflib.namespace import Namespace, NamespaceManager\nfrom openpecha.catalog import CatalogManager\nfrom openpecha.github_utils import delete_repo\n\nfrom ocr.google_ocr import get_text_from_image\nfrom ocr.slack_notifier import slack_notifier\n\n\n#Host config\nHOSTNAME = socket.gethostname()\n\n# S3 config\nos.environ['AWS_SHARED_CREDENTIALS_FILE'] = \"~/.aws/credentials\"\nARCHIVE_BUCKET = \"archive.tbrc.org\"\nOCR_OUTPUT_BUCKET = \"ocr.bdrc.io\"\nS3 = boto3.resource('s3')\nS3_client = boto3.client('s3')\narchive_bucket = S3.Bucket(ARCHIVE_BUCKET)\nocr_output_bucket = S3.Bucket(OCR_OUTPUT_BUCKET)\n\n# URI config\nBDR = Namespace(\"http://purl.bdrc.io/resource/\")\nNSM = NamespaceManager(rdflib.Graph())\nNSM.bind(\"bdr\", BDR)\n\n# s3 bucket directory config\nSERVICE = \"vision\"\nBATCH_PREFIX = 'batch'\nIMAGES = 'images'\nOUTPUT = 'output'\nINFO_FN = 'info.json'\n\n# local directory config\nDATA_PATH = Path('./archive')\nIMAGES_BASE_DIR = DATA_PATH/IMAGES\nOCR_BASE_DIR = DATA_PATH/OUTPUT\nCHECK_POINT_FN = DATA_PATH/'checkpoint.json'\n\n# Checkpoint config\nCHECK_POINT = defaultdict(list)\nCOLLECTION = 'collection'\nWORK = 'work'\nVOL = 'imagegroup'\nlast_work = None\nlast_vol = None\n\n# notifier config\nnotifier = slack_notifier\n\n# openpecha opf setup\ncatalog = CatalogManager(\n formatter_type='ocr',\n last_id_fn=f'{HOSTNAME}_last_id'\n)\n\n# logging config\nlogging.basicConfig(\n filename='bdrc_ocr.log',\n format='%(asctime)s, %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.INFO\n)\n\n# Debug config\nDEBUG = {\n 'status': False\n}\n\n\ndef get_value(json_node):\n if json_node['type'] == 'literal':\n return json_node['value']\n else:\n return NSM.qname(URIRef(json_node[\"value\"]))\n\n\ndef get_s3_image_list(volume_prefix_url):\n \"\"\"\n returns the content of the dimension.json file for a volume ID, accessible at:\n https://iiifpres.bdrc.io/il/v:bdr:V22084_I0888 for volume ID bdr:V22084_I0888\n \"\"\"\n r = requests.get(f'https://iiifpres.bdrc.io/il/v:{volume_prefix_url}')\n if r.status_code != 200:\n logging.error(f\"Volume Images list Error: No images found for volume {volume_prefix_url}: status code: {r.status_code}\")\n return {}\n return r.json()\n\n\ndef get_volume_infos(work_prefix_url):\n \"\"\"\n the input is something like bdr:W22084, the output is a list like:\n [ \n {\n \"vol_num\": 1,\n \"volume_prefix_url\": \"bdr:V22084_I0886\",\n \"imagegroup\": \"I0886\"\n },\n ...\n ]\n \"\"\"\n r = requests.get(f'http://purl.bdrc.io/query/table/volumesForWork?R_RES={work_prefix_url}&format=json&pageSize=400')\n if r.status_code != 200:\n logging.error(f\"Volume Info Error: No info found for Work {work_prefix_url}: status code: {r.status_code}\")\n return\n # the result of the query is already in ascending volume order\n res = r.json()\n for b in res[\"results\"][\"bindings\"]:\n volume_prefix_url = NSM.qname(URIRef(b[\"volid\"][\"value\"]))\n yield {\n \"vol_num\": get_value(b[\"volnum\"]), \n \"volume_prefix_url\": get_value(b[\"volid\"]),\n \"imagegroup\": get_value(b[\"imggroup\"])\n }\n\n\ndef get_s3_prefix_path(work_local_id, imagegroup, service=None, batch_prefix=None, data_types=None):\n \"\"\"\n the input is like W22084, I0886. The output is an s3 prefix (\"folder\"), the function\n can be inspired from \n https://github.com/buda-base/volume-manifest-tool/blob/f8b495d908b8de66ef78665f1375f9fed13f6b9c/manifestforwork.py#L94\n which is documented\n \"\"\"\n md5 = hashlib.md5(str.encode(work_local_id))\n two = md5.hexdigest()[:2]\n\n pre, rest = imagegroup[0], imagegroup[1:]\n if pre == 'I' and rest.isdigit() and len(rest) == 4:\n suffix = rest\n else:\n suffix = imagegroup\n \n base_dir = f'Works/{two}/{work_local_id}'\n if service:\n batch_dir = f'{base_dir}/{service}/{batch_prefix}001'\n paths = {BATCH_PREFIX: batch_dir}\n for dt in data_types:\n paths[dt] = f'{batch_dir}/{dt}/{work_local_id}-{suffix}'\n return paths\n return f'{base_dir}/images/{work_local_id}-{suffix}'\n\n\ndef get_s3_bits(s3path):\n \"\"\"\n get the s3 binary data in memory\n \"\"\"\n f = io.BytesIO()\n try:\n archive_bucket.download_fileobj(s3path, f)\n return f\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == '404':\n logging.error(f'The object does not exist, {s3path}')\n else:\n raise\n return\n\n\ndef save_file(bits, origfilename, imagegroup_output_dir):\n \"\"\"\n uses pillow to interpret the bits as an image and save as a format\n that is appropriate for Google Vision (png instead of tiff for instance).\n This may also apply some automatic treatment\n \"\"\"\n imagegroup_output_dir.mkdir(exist_ok=True, parents=True)\n output_fn = imagegroup_output_dir/origfilename\n if origfilename.endswith('.tif'):\n output_fn = imagegroup_output_dir/f'{origfilename.split(\".\")[0]}.png'\n if output_fn.is_file(): return\n try:\n img = Image.open(bits)\n if len(img.size) > 2:\n img = ImageOps.autocontrast(img, cutoff=0.5)\n except:\n if not bits.getvalue():\n logging.error(f'Empty bytes: {output_fn}')\n else:\n logging.error(f'Pillow issue: {output_fn}')\n return\n\n try:\n img.save(str(output_fn))\n except:\n logging.error(f'Error in saving: {output_fn} : origfilename: {origfilename}')\n return\n\n\ndef image_exists_locally(origfilename, imagegroup_output_dir):\n if origfilename.endswith('.tif'):\n output_fn = imagegroup_output_dir/f'{origfilename.split(\".\")[0]}.png'\n if output_fn.is_file(): return True\n else:\n output_fn = imagegroup_output_dir/origfilename\n if output_fn.is_file(): return True\n\n # ocr output is processed\n path_parts = list(imagegroup_output_dir.parts)\n path_parts[1] = OUTPUT\n output_fn = Path('/'.join(path_parts))/f'{origfilename.split(\".\")[0]}.json.gz'\n if output_fn.is_file(): return True\n\n return False\n\n\ndef save_images_for_vol(volume_prefix_url, work_local_id, imagegroup, images_base_dir):\n \"\"\"\n this function gets the list of images of a volume and download all the images from s3.\n The output directory is output_base_dir/work_local_id/imagegroup\n \"\"\"\n s3prefix = get_s3_prefix_path(work_local_id, imagegroup)\n for imageinfo in get_s3_image_list(volume_prefix_url):\n if DEBUG['status'] and not imageinfo['filename'].split('.')[0] == 'I1KG35630002': continue\n imagegroup_output_dir = images_base_dir/work_local_id/imagegroup\n if image_exists_locally(imageinfo['filename'], imagegroup_output_dir): continue\n s3path = s3prefix+\"/\"+imageinfo['filename']\n filebits = get_s3_bits(s3path)\n if filebits: save_file(filebits, imageinfo['filename'], imagegroup_output_dir)\n\n\ndef gzip_str(string_):\n # taken from https://gist.github.com/Garrett-R/dc6f08fc1eab63f94d2cbb89cb61c33d\n out = io.BytesIO()\n\n with gzip.GzipFile(fileobj=out, mode='w') as fo:\n fo.write(string_.encode())\n\n bytes_obj = out.getvalue()\n return bytes_obj\n\n\ndef apply_ocr_on_folder(images_base_dir, work_local_id, imagegroup, ocr_base_dir):\n \"\"\"\n This function goes through all the images of imagesfolder, passes them to the Google Vision API\n and saves the output files to ocr_base_dir/work_local_id/imagegroup/filename.json.gz\n \"\"\"\n images_dir = images_base_dir/work_local_id/imagegroup\n ocr_output_dir = ocr_base_dir/work_local_id/imagegroup\n ocr_output_dir.mkdir(exist_ok=True, parents=True)\n if not images_dir.is_dir(): return\n for img_fn in images_dir.iterdir():\n result_fn = ocr_output_dir/f'{img_fn.stem}.json.gz'\n if result_fn.is_file(): continue\n try:\n result = get_text_from_image(str(img_fn))\n except:\n logging.error(f'Google OCR issue: {result_fn}')\n continue\n gzip_result = gzip_str(result)\n result_fn.write_bytes(gzip_result)\n\n\ndef get_info_json():\n \"\"\"\n This returns an object that can be serialied as info.json as specified for BDRC s3 storage.\n \"\"\"\n # get current date and time\n now = datetime.now(pytz.utc).isoformat()\n\n info = {\n \"timestamp\": now.split('.')[0],\n 'imagesfolder': IMAGES\n }\n\n return info\n\n\ndef is_archived(key):\n try:\n S3_client.head_object(Bucket=OCR_OUTPUT_BUCKET, Key=key)\n except botocore.errorfactory.ClientError:\n return False\n return True\n\n\ndef archive_on_s3(images_base_dir, ocr_base_dir, work_local_id, imagegroup, s3_paths):\n \"\"\"\n This function uploads the images on s3, according to the schema set up by BDRC, see documentation\n \"\"\"\n # save info json\n info_json = get_info_json()\n s3_ocr_info_path = f'{s3_paths[BATCH_PREFIX]}/{INFO_FN}'\n ocr_output_bucket.put_object(\n Key=s3_ocr_info_path,\n Body=(bytes(json.dumps(info_json).encode('UTF-8')))\n )\n \n # archive images\n images_dir = images_base_dir/work_local_id/imagegroup\n if images_dir.is_dir():\n for img_fn in images_dir.iterdir():\n s3_image_path = f'{s3_paths[IMAGES]}/{img_fn.name}'\n if is_archived(s3_image_path): continue\n ocr_output_bucket.put_object(Key=s3_image_path, Body=img_fn.read_bytes())\n \n # archive ocr output\n ocr_output_dir = ocr_base_dir/work_local_id/imagegroup\n if ocr_output_dir.is_dir():\n for out_fn in ocr_output_dir.iterdir():\n s3_output_path = f'{s3_paths[OUTPUT]}/{out_fn.name}'\n if is_archived(s3_output_path): continue\n ocr_output_bucket.put_object(Key=s3_output_path, Body=out_fn.read_bytes())\n\n\ndef clean_up(data_path, work_local_id=None, imagegroup=None):\n \"\"\"\n delete all the images and output of the archived volume (imagegroup)\n \"\"\"\n if imagegroup:\n vol_image_path = data_path/IMAGES/work_local_id/imagegroup\n if vol_image_path.is_dir():\n shutil.rmtree(str(vol_image_path))\n elif work_local_id:\n work_output_path = data_path/OUTPUT/work_local_id\n if work_output_path.is_dir():\n shutil.rmtree(str(work_output_path))\n else:\n for path in data_path.iterdir():\n shutil.rmtree(str(path))\n\n\ndef get_work_local_id(work):\n if ':' in work:\n return work.split(':')[-1], work\n else:\n return work, f'bdr:{work}'\n\n\nclass OPFError(Exception):\n pass\n\ndef process_work(work):\n global last_work, last_vol\n\n if DEBUG['status']: last_work, last_vol = work, 'I1KG3563'\n work_local_id, work = get_work_local_id(work)\n\n is_work_empty = True\n is_start_work = True\n for i, vol_info in enumerate(get_volume_infos(work)):\n if last_work == work_local_id and \\\n len(vol_info['imagegroup']) == len(last_vol) and \\\n vol_info['imagegroup'] < last_vol: continue\n\n is_work_empty = False\n\n # log work info at 1st vol\n if is_start_work and not DEBUG['status']:\n notifier(f'`[Work-{HOSTNAME}]` _Work {work} processing ...._')\n is_start_work = False\n\n if not DEBUG['status']: notifier(f'* `[Volume-{HOSTNAME}]` {vol_info[\"imagegroup\"]} processing ....')\n try:\n # save all the images for a given vol\n save_images_for_vol(\n volume_prefix_url=vol_info['volume_prefix_url'],\n work_local_id=work_local_id,\n imagegroup=vol_info['imagegroup'],\n images_base_dir=IMAGES_BASE_DIR\n )\n\n # apply ocr on the vol images\n apply_ocr_on_folder(\n images_base_dir=IMAGES_BASE_DIR,\n work_local_id=work_local_id,\n imagegroup=vol_info['imagegroup'],\n ocr_base_dir=OCR_BASE_DIR\n )\n\n # get s3 paths to save images and ocr output\n s3_ocr_paths = get_s3_prefix_path(\n work_local_id=work_local_id,\n imagegroup=vol_info['imagegroup'],\n service=SERVICE,\n batch_prefix=BATCH_PREFIX,\n data_types=[IMAGES, OUTPUT]\n )\n\n # save image and ocr output at ocr.bdrc.org bucket\n archive_on_s3(\n images_base_dir=IMAGES_BASE_DIR,\n ocr_base_dir=OCR_BASE_DIR,\n work_local_id=work_local_id,\n imagegroup=vol_info['imagegroup'],\n s3_paths=s3_ocr_paths\n )\n\n # delete the volume\n clean_up(\n DATA_PATH,\n work_local_id=work_local_id,\n imagegroup=vol_info['imagegroup']\n )\n except:\n # create checkpoint\n save_check_point(imagegroup=f\"{work_local_id}-{vol_info['imagegroup']}\")\n raise RuntimeError\n\n if not is_work_empty:\n try: \n catalog.ocr_to_opf(OCR_BASE_DIR/work_local_id)\n clean_up(DATA_PATH, work_local_id=work_local_id)\n clean_up(Path('./output'))\n save_check_point(work=work_local_id)\n except GithubException as ex:\n save_check_point(imagegroup=f\"{work_local_id}-{vol_info['imagegroup']}\")\n raise GithubException(ex.status, ex.data)\n except GeneratorExit:\n save_check_point(imagegroup=f\"{work_local_id}-{vol_info['imagegroup']}\")\n raise OPFError\n else:\n logging.warning(f'Empty work: {work_local_id}')\n\n\ndef get_work_ids(fn):\n for work in fn.read_text().split('\\n'):\n if not work: continue\n yield work.strip()\n\n\ndef load_check_point():\n global last_work, last_vol\n check_point = json.load(CHECK_POINT_FN.open())\n CHECK_POINT[WORK] = check_point[WORK]\n CHECK_POINT[VOL] = check_point[VOL]\n\n if CHECK_POINT[VOL]:\n last_work, last_vol = CHECK_POINT[VOL].split('-')\n\n\ndef save_check_point(work=None, imagegroup=None):\n if work and work not in CHECK_POINT[WORK]:\n CHECK_POINT[WORK].append(work)\n if imagegroup:\n CHECK_POINT[VOL] = imagegroup\n json.dump(CHECK_POINT, CHECK_POINT_FN.open('w'))\n\n\ndef show_error(ex, ex_type='ocr'):\n\n error = f\"`Here's the error: {ex}\"\n if ex_type == 'ocr':\n error += f\"\\nTraceback: {traceback.format_exc()}`\"\n slack_notifier(f'`[ERROR] Error occured in {socket.gethostname()}`\\n{error}')\n\n\nif __name__ == \"__main__\":\n input_path = Path('Google-OCR/usage/bdrc/input')\n\n notifier(f'`[OCR-{HOSTNAME}]` *Google OCR is running* ...')\n if CHECK_POINT_FN.is_file():\n load_check_point()\n for workids_path in input_path.iterdir():\n for i, work_id in enumerate(get_work_ids(workids_path)):\n if CHECK_POINT[WORK] and work_id in CHECK_POINT[WORK]: continue\n try:\n process_work(work_id)\n except GithubException as ex:\n show_error(ex, ex_type='github')\n error_work = catalog.batch.pop()\n if catalog.batch: catalog.update_catalog()\n if error_work: delete_repo(error_work[0][1:8])\n slack_notifier(f'`[Restart]` *{HOSTNAME}* ...')\n os.execv(f'{shutil.which(\"nohup\")}',['nohup', 'sh', 'run.sh', '&'])\n except OPFError:\n if catalog.batch: catalog.update_catalog()\n slack_notifier(f'`[Restart]` *{HOSTNAME}* ...')\n os.execv(f'{shutil.which(\"nohup\")}',['nohup', 'sh', 'run.sh', '&'])\n except Exception as ex:\n show_error(ex)\n if catalog.batch: catalog.update_catalog()\n sys.exit()\n\n # update catalog every after 5 pecha\n if len(catalog.batch) == 5:\n catalog.update_catalog()\n\n notifier(f'[INFO] Completed {workids_path.name}')\n\n catalog.update_catalog()\n","repo_name":"JeremiPlazas/Google-OCR","sub_path":"usage/bdrc/bdrc_ocr.py","file_name":"bdrc_ocr.py","file_ext":"py","file_size_in_byte":16348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1644787446","text":"from math import sin\nfrom math import pi\nfrom math import sqrt\n\n\nclass Wave:\n def __init__(self, r, c, frequency=0.05, wavelength=100, amplitude=127):\n self.amplitude = amplitude\n self.r = r\n self.c = c\n self.frequency = frequency\n # self.max_distance = 0\n self.wavelength = wavelength\n self.shift = 0\n self.pi2length = 2 * pi / self.wavelength\n self.freqlength = self.frequency * self.wavelength\n\n # def __repr__(self):\n # return f\"{self.amplitude} * sin(2 * pi / {self.wavelength} * (x + {self.wavelength} * {self.frequency} * t) + {self.shift})\"\n\n def next(self, r, c, time):\n dr = r - self.r\n dc = c - self.c\n return self.amplitude * sin(self.pi2length * (sqrt(dr * dr + dc * dc) + self.freqlength * time) + self.shift)\n\n # def getDist(self, r, c):\n # dr = r - self.r\n # dc = c - self.c\n # return sqrt(dr * dr + dc * dc)\n\n # def isMaxDist(self, r, c):\n # if self.getDist(r, c) == self.max_distance:\n # self.max_distance += 1\n # return True\n # return False\n","repo_name":"SkyPromp/Waves","sub_path":"waves.py","file_name":"waves.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74365330612","text":"import sys\r\ninput = sys.stdin.readline\r\nfrom operator import itemgetter, attrgetter\r\nN = int(input())\r\ntimeList = []\r\nfor n in range(N):\r\n startTime, endTime = map(int, input().split())\r\n timeList.append([startTime, endTime])\r\ntimeList = sorted(timeList, key=itemgetter(1,0))\r\nstack = []\r\nstack.append(timeList[0])\r\nfor i in range(1, len(timeList)):\r\n if stack[-1][1]<=timeList[i][0]:\r\n stack.append(timeList[i])\r\nprint(len(stack))","repo_name":"unifolio0/cote_study","sub_path":"백준/Silver/1931. 회의실 배정/회의실 배정.py","file_name":"회의실 배정.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1940563794","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nfrom log.colors import colors, color_id2label\nfrom log.visdom_logger import visdom_logger\n# from sbfml.utils.pano import pano\n\nfrom typing import Union, List, Tuple, Dict\n\ndef plot_loss_val(\n\tlogger\t: visdom_logger, \n\tvalue\t: Union[float, int, torch.Tensor],\n\titer\t: int,\n\tname\t: str, \n\tmode\t: str=\"train\", \n\tlcolor\t: str=\"blue\", \n\tmarkers\t: bool=True, \n\tmarker_symbol\t: str=\"dot\", \n\tmarker_size\t\t: float=5\n) -> None:\n\t'''\n\t@brief Plot loss values as line plots\n\t@param\tlogger\t\t\tThe VisdomLogger instance to use\n\t@param\tloss\t\t\tThe loss value(s) to plot. Must be a dictionary { \"value\": val, \"iter\": iteration }\n\t@param\tname\t\t\tThe loss name(s) for each value\n\t@param\tmode\t\t\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\t@param\tlcolor\t\t\tThe line color for each of the loss values (use Colors dict for ease of use or list of RGB values)\n\t@param\tmarkers\t\t\tIf True (default) plots markers on line start/emd\n\t@param\tmarkerSymbol\tThe marker symbol to use\n\t@param\tmarkerSize\t\tThe size of the marker symbol\n\t'''\n\tif isinstance(value, torch.Tensor):\n\t\tvalue = value.detach().cpu().numpy().item()\n\n\tlcolor = colors[lcolor]\n\tlcolor = (np.expand_dims(np.array(lcolor), axis=0) * 255).astype(np.uint8)\n\tplot_name = f\"{name} {mode}\"\n\t\n\topts = {\n\t\t\"title\"\t\t: plot_name,\n\t\t\"xlabel\"\t: \"Iteration\",\n\t\t\"ylabel\"\t: name,\n\t\t\"linecolor\"\t: lcolor,\n\t\t\"markers\"\t: markers,\n\t\t\"markersymbol\"\t: marker_symbol,\n\t\t\"markersize\"\t: marker_size\n\t}\n\tlogger.instance.line(X=np.array([iter]), Y=np.array([value]), env=logger.env_name, win=plot_name, opts=opts, update=\"append\")\n\n\ndef plot_text(\n\tlogger\t: visdom_logger, \n\ttext\t: str, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t'''\n\t@brief Plot Text\n\t@param\tlogger\t\t\tThe VisdomLogger instance to use\n\t@param\ttext\tThe text to plot (can be html)\n\t@param \ttitle\tThe window title\n\t'''\n\tplot_name = f\"{title} {mode}\"\n\tlogger.instance.text(text, env=logger.env_name, win=plot_name)\n\n\ndef plot_img(\n\tlogger\t: visdom_logger, \n\timg\t\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t'''\n\t@brief Plot an Image\n\t@param\tlogger\t\t\tThe VisdomLogger instance to use\n\t@param\timg\t\tThe image to plot (3-channel) (minibatch of torch.Tensor - if the minibatch is larger than 1, then only the first image in the batch will be displayed)\n\t@param\ttitle\tThe title of the window to plot the image\n\t@param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\t'''\n\tb, c, h, w = img.size()\n\tif c == 1:\n\t\timg = torch.repeat_interleave(img, 3, dim=1)\n\telif c != 3:\n\t\traise RuntimeError(\"PlotImage() can display only 3-channel images.\")\n\t\n\tdisp = img.detach().cpu().numpy()\n\tdisp = disp[0, :, :, :]\n\tplot_name = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plot_name\n\t}\n\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\n\ndef plot_img_bbox(\n\tlogger\t: visdom_logger, \n\timg\t\t: torch.Tensor, \n\tbbox\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t'''\n\t@brief Plot an Image with a bounding box overlayed\n\t@param\tlogger\tThe VisdomLogger instance to use\n\t@param\timg\t\tThe image to plot (3-channel) (minibatch of torch.Tensor - if the minibatch is larger than 1, then only the first image in the batch will be displayed)\n\t@param\tbbox\tThe bounding box to plot (can be multiple bounding boxes)\n\t@param\ttitle\tThe title of the window to plot the image\n\t@param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\t'''\n\tb, c, h, w = img.size()\n\t# if bbox is in format N x 4 (xmin, ymin, xmax, ymax)\n\tassert c == 3, \"plot_img_bbox() can display only 3-channel images.\"\n\tdisp = img[0, :, :, :]\n\tdisp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\n\tif bbox.ndim == 2:\n\t\tbb, bw = bbox.size()\n\t\tassert bw == 4, \"plot_img_bbox() accepts bbox in format [xmin, ymin, xmax, ymax]\"\n\n\t\tcbbox = bbox[0, :].detach().cpu().numpy()\n\t\tdisp = cv2.rectangle(disp, (cbbox[0], cbbox[1]), (cbbox[2], cbbox[3]), colors[\"lime\"], 2)\n\telif bbox.ndim == 3:\n\t\tbb, bc, bw = bbox.size()\n\t\tassert bw == 4, \"plot_img_bbox() accepts bbox in format [xmin, ymin, xmax, ymax]\"\n\t\t\n\t\tfor bboxidx, color in zip(range(bc), colors):\n\t\t\tcbbox = bbox[0, bboxidx, :].detach().cpu().numpy()\n\t\t\tdisp = cv2.rectangle(disp, (cbbox[0], cbbox[1]), (cbbox[2], cbbox[3]), colors[color], 2)\n\n\t# disp = img[0, :, :, :]\n\t# disp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\tdisp = disp.transpose(2, 0, 1)\n\tplot_name = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plot_name\n\t}\n\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\n\ndef plot_detection(\n\tlogger\t\t\t: visdom_logger,\n\timg\t\t\t\t: torch.Tensor,\n\tbbox\t\t\t: torch.Tensor,\n\tclasses\t\t\t: torch.Tensor,\n\tclass_mapping\t: Dict,\n\ttitle\t\t\t: str,\n\tmode\t\t\t: str\n) -> None:\n\t'''\n\t@brief Plot an Image with a bounding box overlayed\n\t@param\tlogger\tThe VisdomLogger instance to use\n\t@param\timg\t\tThe image to plot (3-channel) (minibatch of torch.Tensor - if the minibatch is larger than 1, then only the first image in the batch will be displayed)\n\t@param\tbbox\tThe bounding box to plot (can be multiple bounding boxes)\n\t@param\ttitle\tThe title of the window to plot the image\n\t@param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\t'''\n\tb, c, h, w = img.size()\n\t# if bbox is in format N x 4 (xmin, ymin, xmax, ymax)\n\tassert c == 3, \"plot_img_bbox() can display only 3-channel images.\"\n\tdisp = img[0, :, :, :]\n\tdisp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\n\tif bbox.ndim == 2:\n\t\tbb, bw = bbox.size()\n\t\tassert bw == 4, \"plot_img_bbox() accepts bbox in format [xmin, ymin, xmax, ymax]\"\n\n\t\t# cbbox = bbox[0, :].detach().cpu().numpy()\n\t\t# cbbox = bbox.detach().cpu().numpy()\n\t\tfor bboxidx, color in zip(range(bbox.size(0)), colors.keys()):\n\t\t\tcbbox = bbox[bboxidx, :].detach().cpu().numpy()\n\t\t\tcolor = (np.asarray(list(colors[color])) * 255).astype(np.uint8)\n\t\t\tcolor = tuple(color.tolist())\n\t\t\tdisp = cv2.rectangle(disp, (cbbox[0], cbbox[1]), (cbbox[2], cbbox[3]), color, 2)\n\t\t\tclassname = classes[bboxidx]\n\t\t\tclassname = class_mapping[classname.item()]\n\t\t\tdisp = cv2.putText(disp, classname, (cbbox[0], cbbox[1]), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.3, color=(255, 255, 255))\n\t\t# disp = cv2.putText(disp, \"Yoooo\", (cbbox[0], cbbox[1]))\n\telif bbox.ndim == 3:\n\t\tbb, bc, bw = bbox.size()\n\t\tassert bw == 4, \"plot_img_bbox() accepts bbox in format [xmin, ymin, xmax, ymax]\"\n\t\t\n\t\tfor bboxidx, color in zip(range(bc), colors.keys()):\n\t\t\tcbbox = bbox[0, bboxidx, :].detach().cpu().numpy()\n\t\t\tcolor = (np.asarray(list(colors[color])) * 255).astype(np.uint8)\n\t\t\tcolor = tuple(color.tolist())\n\t\t\tdisp = cv2.rectangle(disp, (cbbox[0], cbbox[1]), (cbbox[2], cbbox[3]), color, 2)\n\t\t\tclassname = classes[:, bboxidx]\n\t\t\tclassname = class_mapping[classname.item()]\n\t\t\tdisp = cv2.putText(disp, classname, (cbbox[0], cbbox[1]), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.3, color=(255, 255, 255))\n\n\tdisp = disp.transpose(2, 0, 1)\n\tplot_name = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plot_name\n\t}\n\tif logger is not None:\n\t\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\tdisp = disp.transpose(1, 2, 0)\n\tdisp = cv2.cvtColor(disp, cv2.COLOR_RGB2BGR)\n\treturn disp\n\t\n\n\ndef plot_img_quad(\n\tlogger\t: visdom_logger, \n\timg\t\t: torch.Tensor, \n\tquad\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t##\n\t# @brief Plot an Image with a bounding box overlayed\n\t# @param\tlogger\tThe VisdomLogger instance to use\n\t# @param\timg\t\tThe image to plot (3-channel) (minibatch of torch.Tensor - if the minibatch is larger than 1, then only the first image in the batch will be displayed)\n\t# @param\tbbox\tThe bounding box to plot (can be multiple bounding boxes)\n\t# @param\ttitle\tThe title of the window to plot the image\n\t# @param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\tb, c, h, w = img.size()\n\t# if bbox is in format N x 4 (xmin, ymin, xmax, ymax)\n\tassert c == 3, \"plot_img_quad() can display only 3-channel images.\"\n\tassert quad.ndim == 3, \"plot_img_quad() accepts quads of dimensions N x 2 x 4\"\n\t\n\tqb, qc, qw = quad.size()\n\t\n\tdisp = img[0, :, :, :]\n\tdisp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\t\n\tq = quad[0, :, :].detach().cpu().numpy()\n\tq = q.reshape(qc, qw).astype(np.int)\n\t\n\tdisp = cv2.drawContours(disp, [q], contourIdx=-1, color=colors[\"blue\"], thickness=3)\n\tdisp = disp.transpose(2, 0, 1)\n\t\n\tplot_name = f\"{title} {mode}\"\n\topts = { \"title\": plot_name }\n\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\n\ndef plot_img_points(\n\tlogger\t: visdom_logger, \n\timg\t\t: torch.Tensor, \n\tpoints\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t# TODO: Use list of point tensor for multiple point-set ploting\n\t##\n\t# @brief Plot an Image with a bounding box overlayed\n\t# @param\tlogger\tThe VisdomLogger instance to use\n\t# @param\timg\t\tThe image to plot (3-channel) (minibatch of torch.Tensor - if the minibatch is larger than 1, then only the first image in the batch will be displayed)\n\t# @param\tbbox\tThe bounding box to plot (can be multiple bounding boxes)\n\t# @param\ttitle\tThe title of the window to plot the image\n\t# @param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\tb, c, h, w = img.size()\n\t# if bbox is in format N x 4 (xmin, ymin, xmax, ymax)\n\tassert c == 3, \"plot_img_points() can display only 3-channel images.\"\n\tdisp = img[0, :, :, :]\n\tdisp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\n\tif isinstance(points, torch.Tensor) or (isinstance(points, List) and len(points) > 1):\n\t\tb, c, w = points.size()\n\t\tpoints = points[0, :, :].detach().cpu().numpy()\n\t\tcolor = np.array(colors[\"lime\"]) * 255\n\t\tfor pointIdx in range(c):\n\t\t\tpoint = points[pointIdx, :]\n\t\t\tdisp = cv2.circle(disp, tuple(point), radius=2, color=color, thickness=-1)\n\n\t\tdisp = disp.transpose(2, 0, 1)\n\t\tplot_name = f\"{title} {mode}\"\n\t\topts = { \"title\": plot_name }\n\t\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\n\ndef plot_compare_img_points(\n\tlogger: visdom_logger, \n\timg: torch.Tensor, \n\tpoints: torch.Tensor, \n\tcompare_points, \n\ttitle, \n\tmode=\"train\"\n) -> None:\n\tb, c, h, w = img.size()\n\t# if bbox is in format N x 4 (xmin, ymin, xmax, ymax)\n\tassert c == 3, \"plot_compare_img_points() can display only 3-channel images.\"\n\tdisp = img[0, :, :, :]\n\tdisp = np.ascontiguousarray(disp.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.float32)\n\n\tb, c, w = points.size()\n\tpoints = points[0, :, :].detach().cpu().numpy()\n\tcomp_points = compare_points[0, :, :].detach().cpu().numpy()\n\tfor point_idx in range(c):\n\t\tpoint = points[point_idx, :]\n\t\tcomp_point = comp_points[point_idx, :]\n\t\tdisp = cv2.circle(disp, tuple(point), radius=3, color=colors[\"lime\"], thickness=-1)\n\t\tdisp = cv2.circle(disp, tuple(comp_point), radius=3, color=colors[\"red\"], thickness=-1)\n\n\tdisp = disp.transpose(2, 0, 1)\n\tplot_name = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plot_name\n\t}\n\tlogger.instance.image(disp, env=logger.env_name, opts=opts, win=plot_name)\n\n##\n# @brief Plot images on a grid\n# @param\tlogger\t\t\tThe VisdomLogger instance to use\n# @param\timgs\tThe images to plot (3-channel) (minibatch of torch.Tensor - if minibatch is larger than 1, then batchCount images will be displayed)\n# @params\ttitle\tThe title of the window to plot the images\n# @param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n# @param\tnrow\tThe number of images in a row\n# @param\tpad\t\tThe padding between the images\ndef plot_images(logger, imgs, title, mode=\"Train\", nrow=4, pad=2):\n\traise NotImplementedError()\n\n\ndef plot_map(\n\tlogger\t: visdom_logger, \n\tmap\t\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\", \n\tcmap\t: str=\"Viridis\"\n) -> None:\n\t##\n\t# @brief Plot a single channel image using a colormap\n\t# @param\tlogger\tThe VisdomLogger instance to use\n\t# @param\tmap\t\tThe map to plot (1-channel) (minibatch of torch.Tensor - if the minibatch size is larger than 1, then the first map in the batch will be displayed)\n\t# @param\ttitle\tThe title of the window to plot the map\n\t# @param\tmode\tThe experiment mode (\"Train\", \"Evaluation\", \"Test\" or whatever - it used for window naming on Visdom)\n\t# @param\tcmap\tThe colormap to use\n\tb, c, h, w = map.size()\n\tif c != 1:\n\t\traise Warning(\"PlotMap() can display only single channel images.\")\n\tdisp = None\n\tdisp = map[0, 0, :, :]\n\tdisp = torch.flip(disp.detach().cpu(), [0]).numpy()\n\tplot_name = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plot_name,\n\t\t\"colormap\": cmap\n\t}\n\tlogger.instance.heatmap(disp, win=plot_name, env=logger.env_name, opts=opts)\n\n\ndef plot_activations(\n\tlogger\t: visdom_logger, \n\tval\t\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\", \n\tcmap\t: str=\"Electric\"\n) -> None:\n\t##\n\t# @brief Plot a tensor that represents a module's activations.\n\t# The input tensor can be of arbitrary size (batch and channel-wise).\n\t# This function plots the channel-wise mean value of the input tensor\n\t# @param\tlogger\t\tThe VisdomLogger instance to use\n\t# @param\tactivations\tThe acitvations tensor to plot\n\t# @param\ttitle\t\tThe plot's title\n\t# @param\tmode\t\tThe experiment mode (\"Train\", \"Validation\", \"Test\" or whatever you may like)\n\t# @param\tcmap\t\tThe colormap to use\n\tb, c, h, w = val.size()\n\tplotName = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plotName,\n\t\t\"colormap\": cmap\n\t}\n\t# reverse the order of rows (height) because in visdom's heatmap (0, 0) is the bottom left point of the map\n\tactivations = torch.flip(val, dims=[2])\n\tactivations = activations[0, :, :, :]\n\tnum_elmnts = c * h * w\n\tactivations = torch.sum(activations, dim=0)\n\tactivations = (activations - activations.min()) / (activations.max() - activations.min())\n\tlogger.instance.heatmap(activations, win=plotName, env=logger.env_name, opts=opts)\n\n\ndef plot_aggregated_activations(\n\tlogger\t\t: visdom_logger, \n\tactivations\t: torch.Tensor, \n\tsize\t\t: Union[List[int], Tuple[int, int]], \n\ttitle\t\t: str, \n\tmode\t\t: str=\"Train\", \n\tcmap\t\t: str=\"Electric\"\n) -> None:\n\t##\n\t# @brief Plot an aggregation of activations\n\t# The input is a list/tuple of activation tensors\n\t# @parram\tlogger\t\tThe VisdomLogger instance to use\n\t# @param\tactivations\tThe aggregation of activation maps to plot\n\t# @param\ttitle\t\tThe window title\n\t# @param\tmode\t\tThe experiment mode (\"Train\", \"Test\", \"Validation\" or whatever)\n\t# @param\tcmap\t\tThe color map to use\n\tplotName = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plotName,\n\t\t\"colormap\": cmap\n\t}\n\tupsample = nn.UpsamplingBilinear2d(size=size)\n\taggregated = torch.zeros(size, dtype=torch.float32)\n\tfor activation in activations:\n\t\tactivation = activation\n\t\tb, c, h, w = activation.size()\n\t\tactivation = torch.flip(activation, dims=[2])\n\t\tactivation = upsample(activation)\n\t\tactivation = activation[0, :, :, :]\n\t\tactivation = torch.sum(activation, dim=0)\n\t\taggregated += activation.cpu()\n\taggregated = (aggregated - aggregated.min()) / (aggregated.max() - aggregated.min())\n\tlogger.instance.heatmap(aggregated, win=plotName, env=logger.env_name, opts=opts)\n\n\ndef plot_aggregated_activations_over(\n\tlogger\t\t: visdom_logger, \n\timg\t\t\t: torch.Tensor, \n\tactivations\t: torch.Tensor, \n\tsize\t\t: Union[List[int], Tuple[int, int]], \n\ttitle\t\t: str, \n\tmode\t\t: str=\"Train\", \n\tcmap\t\t: str=\"Electric\"\n) -> None:\n\t##\n\t# @brief Plot an aggregation of activations\n\t# The input is a list/tuple of activation tensors\n\t# @parram\tlogger\t\tThe VisdomLogger instance to use\n\t# @param\tactivations\tThe aggregation of activation maps to plot\n\t# @param\ttitle\t\tThe window title\n\t# @param\tmode\t\tThe experiment mode (\"Train\", \"Test\", \"Validation\" or whatever)\n\t# @param\tcmap\t\tThe color map to use\n\tplotName = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": plotName,\n\t\t\"colormap\": cmap\n\t}\n\taggregated = torch.zeros(size, dtype=torch.float32)\n\tfor activation in activations:\n\t\tb, c, h, w = activation.size()\n\t\tactivation = torch.flip(activation, dims=[2])\n\t\tactivation = activation[0, :, :, :]\n\t\tactivation = torch.sum(activation, dim=0)\n\t\tactivation = F.interpolate(activation, size=size, mode=\"linear\")\n\t\taggregated += activation\n\taggregated = (aggregated - aggregated.min()) / (aggregated.max() - aggregated.min())\n\tlogger.instance.heatmap(aggregated, win=plotName, env=logger.env_name, opts=opts)\n\n\ndef plot_img_overlay(\n\tlogger\t: visdom_logger, \n\timage\t: torch.Tensor, \n\toverlay\t: torch.Tensor, \n\tweight\t: float=0.3, \n\ttitle\t: str=None, \n\tmode\t: str=\"Train\"\n) -> None:\n\t##\n\t# @brief plot an image with an overlay.\n\t# @param\tlogger\t`visdom_logger`. A `visdom_logger` instance \n\t# @param\timage\t`torch.tensor`. The image to plot.\n\t# @param\toverlay\t`torch.tensor`. The overlay to plot.\n\t# @param\tweight\t`float`. The transparency weight of the overlay (in [0.0, 1.0]).\n\t# @param\ttitle\t`str`. The title of the plot.\n\t# @param\tmode\t`str`. The experiment mode\n\tplotName = f\"{title} {mode}\"\n\tisOneHot = False\n\tif overlay.ndim == 3:\n\t\toverlay = overlay.unsqueeze(1)\n\t\tisOneHot = True\n\tcb, cc, ch, cw = image.size()\n\tob, oc, oh, ow = overlay.size()\n\n\tfor idx in range(logger.batch_count):\n\t\timg = image[idx, :, :, :].detach().cpu().numpy().transpose(1, 2, 0)\n\t\tover = None\n\t\tif not isOneHot:\n\t\t\tover = torch.argmax(overlay, 1)[idx, :, :].unsqueeze(2).cpu().numpy().astype('uint8')\n\t\telse:\n\t\t\tover = overlay[idx, 0, :, :].unsqueeze(2).cpu().numpy().astype('uint8')\n\t\t\toc = np.unique(over).max()\n\t\tover = np.repeat(over, 3, axis=2)\n\t\tfor label in range(oc):\n\t\t\tcolor = np.array(colors[color_id2label[label+15]])\n\t\t\tidx = over == (label, label, label)\n\t\t\tidx = idx[:, :, 0] & idx[:, :, 1] & idx[:, :, 2]\n\t\t\tover[idx, :] = color\n\t\t\t# over[np.where((over==[label, label, label]).all(axis=2))] = color\n\t\tover = over.astype('float32')\n\t\tover = (over - over.min()) / (over.max() - over.min())\n\t\tres = cv2.addWeighted(over, weight, img, 1.0-weight, 0).transpose(2, 0, 1)\n\t\tplotName += f\" {idx}\"\n\t\topts = {\"title\": plotName}\n\t\tlogger.instance.image(res, win=plotName, env=logger.env_name, opts=opts)\n\n\ndef plot_scatter(\n\tlogger\t: visdom_logger, \n\tval\t\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"Train\"\n) -> None:\n\t##\n\t# @brief Plot a scatter diagram\n\t# @param\tlogger\tThe VisdomLogger instance to use\n\t# @param\tval\t\tThe values to plot (must be 2d)\n\t# @param\ttitle\tThe plot title\n\t# @param\tmode\tThe experiment mode (\"Train\", \"Validation\", \"Test\" or whatever you want)\n\tplotName = f\"{title} {mode}\"\n\topts = {\n\t\t\"title\": f\"{title} {mode}\"\n\t}\n\tlogger.instance.scatter(val.unsqueeze(0), win=plotName, env=logger.env_name, opts=opts)\n\n\n# def plot_pano_lines_from_points(\n# \tlogger\t: visdom_logger, \n# \timg\t\t: torch.Tensor, \n# \tpoints\t: torch.Tensor, \n# \ttitle\t: str, \n# \tmode\t: str=\"train\"\n# ) -> None:\n# \t##\n# \t# @brief\n# \tb, c, h, w = img.size()\n# \tplot_name = f\"{title} {mode}\"\n# \tpano_utils = pano(w, h)\n# \tdisp = img[0, :, :, :].detach().cpu().numpy().transpose(1, 2, 0)\n# \tpoint_disp = points[0, :, :].detach().cpu().numpy()\n# \tpoint_all = [point_disp]\n# \tfor i in range(len(point_disp)):\n# \t\tpoint_all.append(point_disp[i, :])\n# \t\tpoint_all.append(point_disp[(i + 2) % len(point_disp), :])\n# \tpoint_all = np.vstack(point_all)\n# \trs, cs = pano_utils.line_idx_from_coords(point_all)\n# \trs = np.array(rs)\n# \tcs = np.array(cs)\n\n# \tpano_edge_c = disp.astype(np.uint8)\n# \tfor dx, dy in [[-1, 0], [1, 0], [0, 0], [0, 1], [0, -1]]:\n# \t\tpano_edge_c[np.clip(rs + dx, 0, h - 1), np.clip(cs + dy, 0, w - 1), 0] = 0\n# \t\tpano_edge_c[np.clip(rs + dx, 0, h - 1), np.clip(cs + dy, 0, w - 1), 1] = 0\n# \t\tpano_edge_c[np.clip(rs + dx, 0, h - 1), np.clip(cs + dy, 0, w - 1), 2] = 255\n# \topts = { \"title\": title }\n# \tpano_edge_c = (pano_edge_c - pano_edge_c.min()) / (pano_edge_c.max() - pano_edge_c.min())\n# \tpano_edge_c = pano_edge_c.transpose(2, 0, 1)\n# \tlogger.instance.image(pano_edge_c, win=plot_name, env=logger.env_name, opts=opts)\n\n\ndef plot_semantic_map(\n\tlogger\t: visdom_logger, \n\tmap\t\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"train\"\n) -> None:\n\t##\n\t# @brief\n\tif isinstance(map, list):\n\t\tif len(map[0].size()) == 3:\n\t\t\tmap = map[0].unsqueeze(0) \n\t\telse:\n\t\t\tmap = map[0].permute(1, 0, 2, 3)\n\t# else:\n\t# \tmap = map.permute(1, 0, 2, 3)\n\tb, c, h, w = map.size()\n\t\n\t\t# raise RuntimeError(f\"The input segmenation map should have 1 channel.\")\n\tplot_name = f\"{title} {mode}\"\n\tclass_ids = map.detach().cpu().numpy()[0, :, :, :]\n\t\n\tif class_ids.shape[0] != 1:\n\t\tclass_ids = np.argmax(class_ids, axis=0)\n\t\tclass_ids = np.expand_dims(class_ids, 0)\n\n\tclasses_un = np.unique(class_ids)\n\tdisp = np.zeros([h, w, 3])\n\tfor class_id in classes_un:\n\t\t# + 15 to get a \"better\" set of colors from our palette starting from 'black\n\t\tcolor = np.array(colors[color_id2label[class_id + 15]])\n\t\tidx = class_id == class_ids\n\t\tidx = np.squeeze(idx, axis=0)\n\t\tdisp[idx, :] = color\n\t\t\t\n\t# disp = (disp - disp.min()) / (disp.max() - disp.min())\n\tdisp = disp.transpose(2, 0, 1) * 255\n\tdisp = disp.astype(np.uint8)\n\topts = { \"title\": title }\n\n\tif logger is not None:\n\t\tlogger.instance.image(disp, win=plot_name, env=logger.env_name, opts=opts)\n\tdisp = disp.transpose(1, 2, 0)\n\tdisp = cv2.cvtColor(disp, cv2.COLOR_RGB2BGR)\n\treturn disp\n\ndef plot_normals_map(\n\tlogger\t: visdom_logger, \n\tnormals\t: torch.Tensor, \n\ttitle\t: str, \n\tmode\t: str=\"train\", \n\tscale_fn=None\n) -> None:\n\n\tb, c, h, w = normals.size()\n\tplot_name = f\"{title} {mode}\"\n\tdisp = normals.detach().cpu().numpy()[0, :, :, :]\n\tdisp = 128 * (disp + 1)\n\tdisp = disp.astype(np.uint8)\n\tdisp = (disp - disp.min()) / (disp.max() - disp.min())\n\topts = { \"title\": title }\n\tlogger.instance.image(disp, win=plot_name, env=logger.env_name, opts=opts)\n\ndef plot_shape_map(\n\tlogger: visdom_logger,\n\tshape_map: torch.Tensor,\n\ttitle: str,\n\tmode: str='train'\n) -> None:\n\t'''\n\t'''\n\tb, c, h, w = shape_map.size()\n\tplot_name = f\"{title} {mode}\"\n\tdisp = shape_map.detach().cpu().numpy()[0, :, :, :]\n\t# shapes are in [-1, 1] so we trasform them in order to lie in [0, 1]\n\tdisp = (disp - disp.min()) / (disp.max() - disp.min())\n\topts = { 'title': title }\n\tlogger.instance.image(disp, win=plot_name, env=logger.env_name, opts=opts)\n\ndef plot_normals_map_no_scale(\n\tlogger: visdom_logger,\n\tnormals_map: torch.Tensor,\n\ttitle: str,\n\tmode: str='train'\n) -> None:\n\tplot_shape_map(logger, normals_map, title, mode)","repo_name":"stavros-p/image_inpainting_detection","sub_path":"log/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":22485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32685993941","text":"from PIL import Image, ImageDraw\n\nimage = Image.new(\"RGBA\", (960, 540), 'white') \n\ndraw = ImageDraw.Draw(image)\n\nwith open('C:\\compGraphics\\compGraphLab2\\DS4.txt', 'r') as file: # в першому аргументі потрібно змінити шлях, на відповідний шлях до файлу DS4.txt\n\n for line in file:\n coordinatesTuple = (int(line[0:3]), int(line[4:7]))\n draw.point(coordinatesTuple, (0, 0, 0))\n\ndel draw\nimage.save(\"C:\\compGraphics\\compGraphLab2\\img.png\", \"PNG\") # в першому аргументі потрібно змінити/поставити шлях, по якому ви хочете зберегти зображення і відповідно назву файлу\n \n\n\n\n\n\n","repo_name":"LazebnyA/compGraphLab2","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35805974382","text":"#Cat Jones\r\n#class for King piece\r\n\r\nclass King:\r\n #params: color - string; row - int; col - int\r\n #initialize an object of class King\r\n #returns: king object\r\n def __init__(self, color, row, col):\r\n self.type = \"king\"\r\n self.color = color\r\n self.row = row\r\n self.col = col\r\n\r\n #params: new_row - int; new_col - int; board - 2D list\r\n #check whether or not a given move is valid, considering the piece's current position as the origin point\r\n #returns: valid- boolean\r\n def validate_move(self, new_row, new_col, board):\r\n valid = True \r\n if self.row == new_row and self.col == new_col:\r\n valid = False\r\n\r\n elif abs(new_col - self.col) > 1 or abs(new_row - self.row) > 1:\r\n valid = False\r\n #difference one or zero (can't both be zero)\r\n \r\n elif self.check(new_row, new_col, board):\r\n valid = False\r\n\r\n #Is target position occupied by same-color piece\r\n if board[new_row][new_col] != 0 and board[new_row][new_col].get_color() == self.color:\r\n valid = False\r\n\r\n return valid\r\n\r\n #params: new_row - int; new_col - int; board - 2D list\r\n #update the object values for row and column, and \"move\" piece from former position to new position in board and change has_moved to True\r\n #returns: none \r\n def update(self, new_row, new_col, board):\r\n board[self.row][self.col] = 0\r\n board[new_row][new_col] = self\r\n\r\n #update ninth row of chess board\r\n if self.color() == \"white\":\r\n board[9][2] = new_row\r\n board[9][3] = new_col\r\n else:\r\n board[9][0] = new_row\r\n board[9][1] = new_col\r\n\r\n self.row = new_row\r\n self.col = new_col\r\n \r\n #params: none\r\n #return the current value of an object's color variable\r\n #returns: self.color - string\r\n def get_color(self):\r\n return self.color\r\n \r\n #params: none\r\n #returns the current value of an object's type variable\r\n #returns: self.type - string\r\n def get_type(self):\r\n return self.type\r\n\r\n def get_col(self):\r\n return self.col\r\n\r\n def get_row(self):\r\n return self.row\r\n \r\n #params: none\r\n #string conversion of the object, represent color and type\r\n #returns string\r\n def __str__(self):\r\n color = self.color[0]\r\n piece_type = \"K\"\r\n return color + piece_type\r\n \r\n #Skye Smith\r\n #params: row - int; col - int; board - 2D-list\r\n #determines whether or not a king of this object's color would be in check at the given position on the given board\r\n #returns: check - boolean\r\n def check(self, row, col, board):\r\n check = False\r\n \r\n #row and column checks (Rook and Queen)\r\n #check for vertical sightline below king\r\n for i in range(row + 1, 8):\r\n piece = board[i][col]\r\n if piece != 0:\r\n if piece.get_color() != self.color and (piece.get_type() == \"rook\" or piece.get_type() == \"queen\"):\r\n check = True\r\n #end loop on first sighted piece, piece of same color/wrong type will still obscure line of attack\r\n break\r\n \r\n #check for vertical sightline above king\r\n if not(check):\r\n for i in range(row - 1, -1, -1):\r\n piece = board[i][col]\r\n if piece != 0:\r\n if piece.get_color() != self.color and (piece.get_type() == \"rook\" or piece.get_type() == \"queen\"):\r\n check = True\r\n #end loop on first sighted piece, piece of same color/wrong type will still obscure line of attack\r\n break\r\n\r\n #check horizontal sightline to the right\r\n if not(check):\r\n for i in range(col + 1, 8):\r\n piece = board[row][i]\r\n if piece != 0:\r\n if piece.get_color() != self.color and (piece.get_type() == \"rook\" or piece.get_type() == \"queen\"):\r\n check = True\r\n #end loop on first sighted piece, piece of same color/wrong type will still obscure line of attack\r\n break\r\n\r\n #check for horizontal sightline to the left\r\n if not(check):\r\n for i in range(col - 1, -1, -1):\r\n piece = board[row][i]\r\n if piece != 0:\r\n if piece.get_color() != self.color and (piece.get_type() == \"rook\" or piece.get_type() == \"queen\"):\r\n check = True\r\n #end loop on first sighted piece, piece of same color/wrong type will still obscure line of attack\r\n break\r\n \r\n #diagonal line checks (Bishop and Queen)\r\n for row_add in [-1, 1]:\r\n #skips rest of the diagonal checks if king is already in check\r\n if check:\r\n break\r\n for col_add in [-1, 1]:\r\n #skips second cycle of loop if king is in check\r\n if check:\r\n break\r\n temp_row = row + row_add\r\n temp_col = col + col_add\r\n while temp_row <= 7 and temp_row >= 0 and temp_col <= 7 and temp_col >= 0:\r\n piece = board[temp_row][temp_col]\r\n if piece != 0:\r\n if piece.get_color() != self.color and (piece.get_type() == \"bishop\" or piece.get_type() == \"queen\"):\r\n check = True\r\n #if there is a piece, it will either place king in check, or obscure the line of attack from any pieces beyond it\r\n break\r\n \r\n temp_row += row_add\r\n temp_col += col_add\r\n \r\n #L checks (Knight)\r\n if not(check):\r\n for d_row in [-2, -1, 1, 2]:\r\n for d_col in [-2, -1, 1, 2]:\r\n knight_row = row + d_row\r\n knight_col = col + d_col\r\n if d_col != d_row and not(knight_row > 7 or knight_row < 0 or knight_col > 7 or knight_col < 0):\r\n piece = board[row + d_row][col + d_col]\r\n if piece != 0 and piece.get_type() == \"knight\" and piece.get_color() != self.color:\r\n check = True\r\n #break only if put in check, knights jump and line of attack can't be obscure\r\n break\r\n \r\n #pawn checks\r\n #initialize add, which direction does a pawn need to be in to threaten the king\r\n if not(check):\r\n add = 0\r\n if self.color == \"white\":\r\n add = -1\r\n else:\r\n add = 1\r\n for i in [-1, 1]:\r\n piece = board[row + add][col + i]\r\n if piece != 0:\r\n if piece.get_type() == \"pawn\" and piece.get_color() != self.color:\r\n check = True\r\n #saves checking second space if first space is aggressing pawn, but doesn't skip second space if not\r\n break\r\n \r\n return check","repo_name":"LumiaBlue/ChessProject","sub_path":"pieces/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7733136990","text":"\"\"\"Small script to extract the LTSpice Data (logs) for usage with Origin Pro\"\"\"\n\ndef main(case: int = 0):\n \"\"\"Main method\n\n @param case: case = 1 => add steps for core (x) in output\n \"\"\"\n\n # Input files\n inp_files = [\"Reihenschwing\", \"Parallelschwing\"]\n\n for file in inp_files:\n input_file = open(file+\".log\",\"r\")\n lines = input_file.readlines()\n\n # Needed variables for extraction\n transformed = []\n data = []\n index = -1\n meas = False\n\n # First two hits have to be excluded\n curr_step = -1\n\n # Total number of lines with data\n max_steps = 65\n\n if (case == 1):\n data.append([])\n for i in range(1,max_steps+1):\n step_x = (i-1)/2\n step_x = str(step_x).replace(\".\",\",\")\n data[0].append(step_x)\n\n index += 1\n\n for line in lines:\n # Indicator where the extraction should start\n if line.__contains__(\"Measurement\"):\n meas = True\n data.append([])\n index += 1\n curr_step = -1\n elif (curr_step == max_steps+1):\n meas = False\n\n if (meas):\n if curr_step > 0:\n # Extract the .meas data\n # Attention: Origin uses the format 1.000,00 for imports\n # (or my settings are incorrect)\n dat = line.split('\\t')[1].replace(\".\",\",\")\n\n data[index].append(dat)\n\n curr_step += 1\n\n # Reformat data list\n # data = [[1, 2, 3, 4, ...], [a, b, c, d, ...]]\n # output = [[1, a], [2, b], [3, c], ...]\n output = []\n for jj in range(len(data[0])):\n rearrange = \"\"\n for ii in range(len(data)):\n # Tab seperated\n rearrange += data[ii][jj] + \"\\t\"\n output.append(rearrange[:-1]+\"\\n\")\n print(output)\n\n # Write data to file \n with open(file+\"_extracted.dat\", \"w+\") as output_file:\n output_file.writelines(output)\n \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tim-orius/Physik-Anfaengerpraktikum2","sub_path":"Versuch9_10/python_scripts/data_extraction.py","file_name":"data_extraction.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74133481651","text":"# Question Difficulty: Easy\n# Question Number: 145\n# Question URL: https://leetcode.com/problems/binary-tree-postorder-traversal/\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n if root is None:\n return []\n \n stack = []\n result = []\n\n while root or stack:\n while root:\n stack.append(root)\n root = root.left \n temp = stack[-1].right\n if temp:\n root = temp\n else:\n temp = stack.pop()\n result.append(temp.val)\n while stack and temp == stack[-1].right:\n temp = stack.pop()\n result.append(temp.val)\n return result ","repo_name":"imaaduddin/LeetCode-Solutions-Continued","sub_path":"Binary-Tree-Postorder-Traversal.py","file_name":"Binary-Tree-Postorder-Traversal.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32971688257","text":"import bottle\nimport os\n\napp = bottle.Bottle()\n\nangle = 90\nos.system(\"sudo python servo.py 90\")\n\n@app.route('/')\ndef root():\n\t\"Root\"\n\treturn bottle.template('main.tpl',angle=angle)\n\n@app.route('/turn/')\ndef turn(newangle=80):\n\t\"Turn the servo to the specified angle\"\n\tglobal angle\n\tangle = newangle\n\tos.system(\"sudo python servo.py %i\" % int(angle))\n\treturn \"Turned to %i\" % int(angle)\n\n@app.route('/drive')\ndef drive():\n\t\"Turn camera and snap\"\n\tglobal angle\n\tangle = int(bottle.request.query.angle)\n\tos.system(\"sudo python servo.py %i\" % angle)\n\tos.system(\"raspistill -vf -hf -o static/snap.jpg\")\n\tbottle.redirect('/')\n\n@app.route('/snap')\ndef snap():\n\t\"Snap a picture\"\n\tos.system('raspistill -vf -hf -o static/snap.jpg')\n\treturn bottle.static_file('snap.jpg',root='static')\n\n@app.route('/static/')\ndef static(filename):\n\t\"Return a static file\"\n\treturn bottle.static_file(filename,root='static')\n\nif __name__ == \"__main__\":\n\tbottle.run(app,host='0.0.0.0')\n\n","repo_name":"dmollaaliod/raspicamserver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35731848832","text":"from aiogram import types\nfrom asyncio import sleep\nfrom typing import Union\nimport NekoGram\n\n\nasync def default_start_function(message: Union[types.Message, types.CallbackQuery]):\n neko: NekoGram.Neko = message.conf['neko']\n if not await neko.storage.check_user_exists(user_id=message.from_user.id):\n lang = message.from_user.language_code if message.from_user.language_code in neko.texts.keys() \\\n else neko.storage.default_language\n await neko.storage.create_user(user_id=message.from_user.id, language=lang)\n await sleep(0.1) # Sleep a bit to make sure user is added to the database\n else:\n # Completely erase user data\n await neko.storage.set_user_data(user_id=message.from_user.id)\n\n data = await neko.build_text(text='start', user=message.from_user)\n if isinstance(message, types.Message):\n await data.data.send_message(user_id=message.from_user.id, neko=neko)\n await message.delete()\n else:\n await data.data.edit_text(message=message.message)\n","repo_name":"lyteloli/CoinTracker","sub_path":"NekoGram/handlers/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33606684097","text":"from dependencies import spark_pg_utils\n\n\ndef solution_1(spark):\n\n import pyspark.sql.functions as F\n from pyspark.sql.window import Window\n\n seat_df = spark.read_table_as_df(\"seat_626\")\n seat_df.show()\n\n result_df = seat_df.alias('s1') \\\n .withColumn('order', F.when(F.col('id') % 2 == 0, F.col('id') - 1).otherwise(F.col('id') + 1)) \\\n .withColumn(\"id\", F.row_number().over(Window.orderBy(\"order\")))\\\n .orderBy('id') \\\n .select([F.col('id'), F.col('student')])\n\n result_df.show()\n\n\nif __name__ == '__main__':\n spark_pg_utils.execute(solution_1)\n","repo_name":"sp496/leetcode-pyspark","sub_path":"dataframe_solutions/Medium/626.py","file_name":"626.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31386277576","text":"import sqlite3\nfrom employee import ESX\nconn = sqlite3.connect('X:\\Python\\src\\SQL\\sqlLite\\SL01\\emp.db')\nc = conn.cursor()\nemp1=ESX('jane','van',606060)\nemp2=ESX('hank ','pym',1260)\nwhile True:\n x=input('Enter Choice 1.Create Table 2.Add employee 3.show employee--> ')\n if x=='1':\n try:\n c.execute(\"\"\"CREATE TABLE employee(\n first text,\n last text,\n pay integer\n )\"\"\")\n except Exception as e:\n print(e)\n elif x=='2':\n c.execute(\"INSERT INTO employee VALUES(?,?,?)\",(emp1.first,emp1.last,emp1.pay))\n # c.execute(\"INSERT INTO employee VALUES(:first,:last,:pay)\",{'first':emp1.first,'last':emp1.last,'pay':emp1.pay})\n # c.execute(\"INSERT INTO employee VALUES('{}','{}',{})\".format(emp1.first,emp1.last,emp1.pay))\n conn.commit()\n print('inserted data into table')\n elif x=='3':\n c.execute(\"SELECT * FROM employee WHERE last='van'\")\n conn.commit()\n print(c.fetchall())\n \n elif x=='4':\n break\n \nconn.commit()\n\nconn.close()\n","repo_name":"parishkar-9790/Python","sub_path":"src/SQL/sqlLite/SL01/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11824188605","text":"'''\r\nРазработать формат представления многогранника и процедуру его каркасной отрисовки\r\nв ортографической и изометрической проекциях. Обеспечить удаление невидимых линий\r\nи возможность пространственных поворотов и масштабирования многогранника.\r\nОбеспечить автоматическое центрирование и изменение размеров изображения при изменении\r\nразмеров окна.\r\nВариант 2: Правильный октаэдр\r\n\r\n'''\r\nimport math\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\r\nimport numpy as np\r\nfrom matplotlib.widgets import Button\r\nfrom matplotlib.widgets import RadioButtons\r\nimport tkinter as tk\r\n\r\nwin = tk.Tk()\r\nwin.geometry(\"400x400\")\r\n\r\na = tk.Entry(win, width=30)\r\ntk.Label(text=\"Длина ребра = \") \\\r\n .grid(row=0, column=0)\r\na.grid(row=0, column=1)\r\n\r\n\r\ndef main():\r\n b = int(a.get())\r\n fig = plt.figure('лабораторная работа № 2, Борисов Я.А. Вариант 2: Правильный октаэдр')\r\n ax = fig.add_subplot(111, projection='3d')\r\n\r\n # вершины пирамиды\r\n v = np.array([[0, 0, b / math.sqrt(2)], [0, 0, -b / math.sqrt(2)], [0, b / math.sqrt(2), 0],\r\n [0, -b / math.sqrt(2), 0], [b / math.sqrt(2), 0, 0], [-b / math.sqrt(2), 0, 0]])\r\n\r\n ax.scatter3D(v[:, 0], v[:, 1], v[:, 2])\r\n\r\n # построение сторон\r\n verts = [[v[0], v[4], v[2]],\r\n [v[0], v[5], v[3]],\r\n [v[1], v[4], v[2]],\r\n [v[1], v[5], v[3]],\r\n [v[0], v[5], v[3]],\r\n [v[0], v[5], v[2]],\r\n [v[0], v[4], v[3]],\r\n [v[1], v[5], v[2]],\r\n [v[1], v[4], v[3]],\r\n ]\r\n\r\n # отрисовка\r\n ax.add_collection3d(\r\n Poly3DCollection(verts, facecolors='pink', linewidths=1, edgecolors='purple', alpha=0.25)) # 0.25\r\n\r\n def iButton(event):\r\n ax.view_init(28, -136)\r\n plt.draw()\r\n\r\n axes_ibutton_add = plt.axes([0.55, 0.05, 0.4, 0.075])\r\n ibutton_add = Button(axes_ibutton_add, 'Изометрическая')\r\n ibutton_add.on_clicked(iButton)\r\n\r\n def oButton(event):\r\n ax.view_init(-2, -36)\r\n plt.draw()\r\n\r\n axes_obutton_add = plt.axes([0.06, 0.05, 0.4, 0.075])\r\n obutton_add = Button(axes_obutton_add, 'Ортографическая')\r\n obutton_add.on_clicked(oButton)\r\n\r\n lines_visibility = plt.axes([0.02, 0.85, 0.5, 0.11], facecolor='lavenderblush')\r\n radio = RadioButtons(lines_visibility, ('Каркасная отрисовка', 'Убрать невидимые линии'))\r\n\r\n def lines(a):\r\n condition = {'Каркасная отрисовка': 0.20, 'Убрать невидимые линии': 1}\r\n alpha = condition[a]\r\n # print(a)\r\n ax.add_collection3d(Poly3DCollection(verts, facecolors='pink', linewidths=1, edgecolors='purple', alpha=alpha))\r\n plt.draw()\r\n\r\n radio.on_clicked(lines)\r\n\r\n plt.show()\r\n\r\n\r\nbutton = tk.Button(win, text=\"Показать фигуру\", command=main)\r\nbutton.grid(row=1, column=1)\r\nwin.mainloop()\r\n","repo_name":"Yannikupy/ComputerGraphics","sub_path":"lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37218453121","text":"class Solution:\n def removeInterval(self, intervals: 'List[List[int]]', toBeRemoved: 'List[int]') -> 'List[List[int]]': # O( N | N )\n output = [] \n for s, e in intervals:\n if s >= toBeRemoved[1] or e <= toBeRemoved[0]: # no overlapping\n output.append([s,e])\n elif toBeRemoved[0] <= s < toBeRemoved[1] and e > toBeRemoved[1]: # s in between, e after tBR[1]\n output.append([toBeRemoved[1], e])\n elif s < toBeRemoved[0] < e and toBeRemoved[1] >= e: # tBR[0] in between, and tBT[1] after e\n output.append([s, toBeRemoved[0]])\n elif s < toBeRemoved[0] < toBeRemoved[1] <= e: # tBR fully overlapped under s, e \n output.append([s, toBeRemoved[0]])\n output.append([toBeRemoved[1], e])\n else: # the entire interval needs to be removed\n continue \n return output\n \n\n\n\n# previous solution\n\n# class Solution:\n# def removeInterval(self, intervals: 'List[List[int]]', toBeRemoved: 'List[int]') -> 'List[List[int]]':\n# output = []\n# for start, end in intervals:\n# if end <= toBeRemoved[0] or start >= toBeRemoved[1]:\n# output.append([start, end])\n# else:\n# if toBeRemoved[0] <= start <= toBeRemoved[1] and toBeRemoved[0] <= end <= toBeRemoved[1]:\n# continue\n\n# elif start <= toBeRemoved[0] and end >= toBeRemoved[1]:\n# if start != toBeRemoved[0]:\n# output.append([start, toBeRemoved[0]])\n# if end != toBeRemoved[1]:\n# output.append([toBeRemoved[1], end])\n\n# elif toBeRemoved[0] <= start <= toBeRemoved[1]:\n# s = toBeRemoved[1]\n# if s != end:\n# output.append([s, end])\n\n# elif toBeRemoved[0] <= end <= toBeRemoved[1]:\n# e = toBeRemoved[0]\n# if start != e:\n# output.append([start, e])\n# return output\n\n\n\n\n#previous approach\n# class Solution:\n# def removeInterval(self, intervals: 'List[List[int]]', toBeRemoved: 'List[int]') -> 'List[List[int]]':\n# stk = []\n# r = toBeRemoved\n# for i in intervals:\n# if i[0] <= r[0] <= i[1] and i[0] <= r[1] <= i[1]: # the entire removal is within the current range\n# stk.append([i[0], r[0]])\n# stk.append([r[1], i[1]])\n#\n# elif r[0] <= i[0] <= r[1] and r[0] <= i[1] <= r[1]: # current i need to be removed\n# continue\n#\n# elif i[0] <= r[0] <= i[1]: # the removal start point is within the curret range\n# stk.append([i[0], r[0]])\n#\n# elif i[0] <= r[1] <= i[1]: # the removal end point is within the curret range\n# stk.append([r[1], i[1]])\n#\n# else: # current I is outside the removal range\n# stk.append(i)\n#\n# output = []\n# for s in stk:\n# if s[0] == s[1]:\n# continue\n# else:\n# output.append(s)\n# return output\n","repo_name":"renjieliu/leetcode","sub_path":"1001_1499/1272.py","file_name":"1272.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"25927702584","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Setup script for anyblok_product\"\"\"\n\nfrom setuptools import setup, find_packages\nimport os\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'),\n 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith open(os.path.join(here, 'CHANGELOG.rst'),\n 'r', encoding='utf-8') as changelog_file:\n changelog = changelog_file.read()\n\nwith open(os.path.join(here, 'VERSION'),\n 'r', encoding='utf-8') as version_file:\n version = version_file.read().strip()\n\nrequirements = [\n 'anyblok',\n 'anyblok_postgres',\n 'anyblok-marshmallow',\n]\n\ntest_requirements = [\n \"pytest\"\n]\n\nbloks = [\n 'product_item=anyblok_product.bloks.product_item:ProductItemBlok',\n (\n 'product_template=anyblok_product.bloks.product_template:'\n 'ProductTemplateBlok'\n ),\n 'product_family=anyblok_product.bloks.product_family:ProductFamilyBlok',\n]\n\ntest_bloks = [\n (\n 'test_family_blok=anyblok_product.test_bloks.test_family_blok:'\n 'TestFamilyBlok'\n ),\n]\n\nsetup(\n name='anyblok_product',\n version=version,\n description=\"Anyblok blok for Product management.\",\n long_description=readme + '\\n\\n' + changelog,\n author=\"Franck Bret\",\n author_email='franckbret@gmail.com',\n url='https://github.com/AnyBlok/anyblok_product',\n packages=find_packages(),\n entry_points={\n 'bloks': bloks,\n 'test_bloks': test_bloks,\n },\n include_package_data=True,\n install_requires=requirements,\n zip_safe=False,\n keywords='anyblok, product, family, template, item',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n test_suite='tests',\n tests_require=test_requirements,\n)\n","repo_name":"AnyBlok/anyblok_product","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32034293191","text":"from tornado import testing\nfrom tornado.httpclient import HTTPRequest\nfrom server import WebSocketServer\nfrom tornado import testing, httpserver, gen, websocket\nfrom helpers import push_event\nfrom authorization import AbstractAuthorization\n\n\nclass WebSocketSimpleTest(testing.AsyncHTTPTestCase):\n\n def get_app(self):\n app = WebSocketServer.make_app()\n return app\n\n def test_404(self):\n response = self.fetch('/')\n self.assertEqual(response.code, 404)\n\n def test_400(self):\n response = self.fetch('/connect')\n self.assertEqual(response.code, 400)\n\n\nclass WebSocketBetterTest(testing.AsyncTestCase):\n\n def setUp(self):\n super(WebSocketBetterTest, self).setUp()\n server = httpserver.HTTPServer(WebSocketServer.make_app())\n socket, self.port = testing.bind_unused_port()\n server.add_socket(socket)\n\n def _mk_connection(self):\n return websocket.websocket_connect(\n 'ws://localhost:{}/connect?sub=test_channel'.format(self.port)\n )\n\n @gen.coroutine\n def _mk_client(self):\n c = yield self._mk_connection()\n _ = yield c.read_message()\n\n raise gen.Return(c)\n\n @testing.gen_test\n def test_hello(self):\n c = yield self._mk_connection()\n response = yield c.read_message()\n self.assertEqual('hello', response)\n\n @testing.gen_test\n def test_channels(self):\n c = yield self._mk_client()\n yield c.read_message()\n self.assertEqual(push_event(\"foo\", (\"test_channel\",)), 1)\n response = yield c.read_message()\n self.assertEqual('\"foo\"', response)\n\n\nclass WebSocketDumbAuthTest(testing.AsyncTestCase):\n\n def setUp(self):\n super(WebSocketDumbAuthTest, self).setUp()\n server = httpserver.HTTPServer(WebSocketServer.make_app('authorization.Dumb'))\n socket, self.port = testing.bind_unused_port()\n server.add_socket(socket)\n\n def _mk_connection(self):\n return websocket.websocket_connect(\n 'ws://localhost:{}/connect?sub=test_channel'.format(self.port)\n )\n\n @testing.gen_test\n def test_dumb(self):\n c = yield self._mk_connection()\n response = yield c.read_message()\n self.assertEqual('hello', response)\n\n\nclass WebSocketHeaderAuthTest(testing.AsyncTestCase):\n\n def setUp(self):\n class HeaderAuth(AbstractAuthorization):\n def authorize(self, handler):\n if 'Authorization' in handler.request.headers:\n return handler.request.headers['Authorization'] == 'Test'\n return False\n\n super(WebSocketHeaderAuthTest, self).setUp()\n server = httpserver.HTTPServer(WebSocketServer.make_app(HeaderAuth()))\n socket, self.port = testing.bind_unused_port()\n server.add_socket(socket)\n\n def _mk_connection(self):\n request = HTTPRequest(\n 'ws://localhost:{}/connect?sub=test_channel'.format(self.port),\n headers={\n 'Authorization': 'Test'\n }\n )\n return websocket.websocket_connect(request)\n\n @testing.gen_test\n def test_authorized(self):\n c = yield self._mk_connection()\n response = yield c.read_message()\n self.assertEqual('hello', response)\n\n\nclass WebSocketRedisPort(testing.AsyncTestCase):\n\n def setUp(self):\n class HeaderAuth(AbstractAuthorization):\n def authorize(self, handler):\n if 'Authorization' in handler.request.headers:\n return handler.request.headers['Authorization'] == 'Test'\n return False\n\n super(WebSocketRedisPort, self).setUp()\n server = httpserver.HTTPServer(WebSocketServer.make_app(HeaderAuth(), redis_port_local=7800))\n socket, self.port = testing.bind_unused_port()\n server.add_socket(socket)\n\n def _mk_connection(self):\n request = HTTPRequest(\n 'ws://localhost:{}/connect?sub=test_channel'.format(self.port),\n headers={\n 'Authorization': 'Test'\n }\n )\n return websocket.websocket_connect(request)\n\n @testing.gen_test\n def test_authorized(self):\n c = yield self._mk_connection()\n response = yield c.read_message()\n self.assertEqual('hello', response)\n\n","repo_name":"Armida220/UAVManager","sub_path":"venv/Lib/site-packages/tornado_websocket_server/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6309839247","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\ndef calc_Er_mom_bal(n, e_z, dp_dr, v_tor, v_pol, B_tor, B_pol):\n pres_term = -1.0 / (n * e_z) * dp_dr\n # pres_term_simp = -1.0 * (Lp * T.J) / e_z # alternative method using grad scale length.\n vxb_term = -1.0 * (v_pol * B_tor - v_tor * B_pol)\n E_r = pres_term + vxb_term\n # E_r_simp = pres_term_simp + vxb_term\n return E_r, pres_term, vxb_term","repo_name":"gt-frc/gt3","sub_path":"GT3/RadialTransport/Functions/CalcErMomBal.py","file_name":"CalcErMomBal.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12554090513","text":"import pygame\nimport random\nfrom pygame.sprite import Sprite\n\nclass Cloud(Sprite):\n def __init__(self, dino_settings, screen):\n super(Cloud, self).__init__()\n im = pygame.image.load('images/cloud.png')\n self.image = pygame.transform.scale(im, (60, 30))\n self.dino_settings = dino_settings\n self.screen = screen\n self.rect = self.image.get_rect()\n heights = [30, 45, 50]\n self.rect.x = self.rect.width * 8\n self.rect.y = heights[random.randrange(0,3)]\n self.x = float(self.rect.x)\n\n self.speed_factor = dino_settings.cloud_speed\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n\n def check_edges(self):\n if self.rect.right <= 0:\n return True\n\n def update(self):\n self.x -= self.speed_factor\n self.rect.x = self.x\n","repo_name":"clairedeng/T_Rex-Dino-game","sub_path":"cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42819638373","text":"from cmdstanpy.stanfit import CmdStanMCMC, CmdStanVB, CmdStanMLE\r\nfrom utils import get_empirical_score, get_synthetic_score, bayesian_p\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef get_posterior_predictions(model, quantity):\r\n \"\"\"\r\n Function to compute binomial cross entropy\r\n :param model: Stan model object\r\n :param quantity: Name of generated quantity\r\n :return: Maximum a posteriori probability estimates\r\n \"\"\"\r\n if isinstance(model, CmdStanMCMC):\r\n df = model.draws_pd()\r\n y_pred = df[\r\n [col for col in df if col.startswith(quantity)]\r\n ].mean(axis=0)\r\n elif isinstance(model, CmdStanVB):\r\n df = model.variational_sample\r\n df.columns = model.column_names\r\n y_pred = df[\r\n [col for col in df if col.startswith(quantity)]\r\n ].mean(axis=0)\r\n elif isinstance(model, CmdStanMLE):\r\n df = model.optimized_params_pd\r\n y_pred = df[\r\n [col for col in df if col.startswith(quantity)]\r\n ].mean(axis=0)\r\n else:\r\n raise TypeError(\"CmdStan model must be MCMC, VB, or MLE\")\r\n return y_pred\r\n\r\n\r\ndef get_binary_cross_entropy(y, y_pred, eta=0.01):\r\n \"\"\"\r\n Function to compute binomial cross entropy\r\n :param y: True labels\r\n :param y_pred: Predicted probabilities\r\n :param eta: Clipping threshold\r\n :return: Binary cross entropy\r\n \"\"\"\r\n y = np.array(y)\r\n y_pred = np.array(y_pred)\r\n y_pred = np.clip(y_pred, eta, 1 - eta)\r\n binomial_deviance = - np.mean(\r\n y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred)\r\n )\r\n return binomial_deviance\r\n\r\n\r\ndef get_misclassification_error(y, y_pred):\r\n \"\"\"\r\n Function to compute binomial cross entropy\r\n :param y: True labels\r\n :param y_pred: Predicted probabilities\r\n :return: Misclassification error (i.e. 1 - accuracy)\r\n \"\"\"\r\n y = np.array(y)\r\n y_pred = np.array(y_pred)\r\n y_pred = np.where(y_pred >= 0.5, 1, 0)\r\n misclassification_error = np.mean(y != y_pred)\r\n return misclassification_error\r\n\r\n\r\ndef evaluate_models(\r\n y, model_MCMC, model_VB, model_MLE, model_glicko\r\n):\r\n \"\"\"\r\n Function to evaluate model performance on test data\r\n :param y: True labels\r\n :param model_MCMC: MCMC model (sample)\r\n :param model_VB: Variational Bayes model (variational)\r\n :param model_MLE: MLE model (optimize)\r\n :param glicko: Original Glicko model\r\n :return: Data frame of evaluation metrics\r\n \"\"\"\r\n # Get model predictions on test sample\r\n y_pred_MCMC = get_posterior_predictions(\r\n model=model_MCMC,\r\n quantity=\"score_ppd\"\r\n )\r\n y_pred_VB = get_posterior_predictions(\r\n model=model_VB,\r\n quantity=\"score_ppd\"\r\n )\r\n y_pred_MLE = get_posterior_predictions(\r\n model=model_MLE,\r\n quantity=\"score_ppd\"\r\n )\r\n y_pred_glicko = model_glicko\r\n # Compute binary cross entropy loss for each model\r\n bce_MCMC = get_binary_cross_entropy(y=y, y_pred=y_pred_MCMC)\r\n bce_VB = get_binary_cross_entropy(y=y, y_pred=y_pred_VB)\r\n bce_MLE = get_binary_cross_entropy(y=y, y_pred=y_pred_MLE)\r\n bce_glicko = get_binary_cross_entropy(y=y, y_pred=y_pred_glicko)\r\n # Compute missclassification error for each model\r\n mce_MCMC = get_misclassification_error(y=y, y_pred=y_pred_MCMC)\r\n mce_VB = get_misclassification_error(y=y, y_pred=y_pred_VB)\r\n mce_MLE = get_misclassification_error(y=y, y_pred=y_pred_MLE)\r\n mce_glicko = get_misclassification_error(y=y, y_pred=y_pred_glicko)\r\n # Combine results\r\n df = pd.DataFrame(\r\n data={\r\n \" \":\r\n [\r\n \"MCMC (HMC)\",\r\n \"MAP (L-BFGS)\",\r\n \"VB (Meanfield)\",\r\n \"Glicko2 (Original)\"\r\n ],\r\n \"binary cross entropy loss $$- \\\\dfrac{1}{n} \\\\sum y \\\\times \\\r\n log(y_{pred}) + (1-y) \\\\times log(1 - y_{pred}) $$\":\r\n [bce_MCMC, bce_MLE, bce_VB, bce_glicko],\r\n \"misclassification error $$1 - \\\\dfrac{1}{n} \\\\sum \\\\text{I} \\\r\n \\\\{y = y_{pred}\\\\} $$\": [mce_MCMC, mce_MLE, mce_VB, mce_glicko]\r\n }\r\n ).round(decimals=3).set_index(\" \")\r\n df = df.style.set_caption(\"$$\\\\textbf{Testing Performance}$$\")\r\n return df\r\n\r\n\r\ndef plot_ppc(score_ppc, observed_data, check, dim):\r\n \"\"\"\r\n Function to plot and Bayesian p-values\r\n :param score_ppc: Synthetic scores\r\n :param observed_data: True data\r\n :param check: PP-Checks to apply\r\n :param dim: Marginalization dim (period or player)\r\n returns None\r\n \"\"\"\r\n observed_data_ = dict()\r\n\r\n for (key, value) in observed_data.items():\r\n\r\n if 'test' not in key:\r\n observed_data_[key] = value\r\n\r\n observed_data = pd.DataFrame.from_dict(\r\n observed_data_\r\n )\r\n\r\n fig_size = 5\r\n\r\n fig, axes = plt.subplots(1, len(check))\r\n\r\n fig.set_size_inches(len(check) * fig_size, fig_size)\r\n\r\n bins = [33] * len(check)\r\n\r\n p_values = bayesian_p(score_ppc, observed_data, check, dim)\r\n\r\n for i in range(len(check)):\r\n main_title = 'stats = {} (p-value : {:.2f})'.format(\r\n check[i], p_values[check[i]]\r\n )\r\n\r\n empirical_score = get_empirical_score(\r\n observed_data, check[i].lower(), dim\r\n )\r\n\r\n sim_dist = get_synthetic_score(\r\n score_ppc, observed_data, check[i].lower(), dim\r\n )\r\n\r\n axes[i].hist(sim_dist, bins=bins[i])\r\n\r\n axes[i].axvline(x=empirical_score, color='r')\r\n\r\n axes[i].title.set_text(main_title)\r\n\r\n axes[i].set_ylabel(\"{}\".format('Number of wins'))\r\n\r\n fig.suptitle('Posterior Predictive Checks w.r.t. Period')\r\n\r\n plt.show()\r\n\r\n return None\r\n\r\n\r\ndef plot_trace(samples,\r\n gammas=[(2, 2), (7, 8), (3, 6), (4, 9)]\r\n ):\r\n \"\"\"\r\n Function to plot trace figures\r\n :param samples: Latent r.v. samples\r\n :param gammas: A list of 4 latent params to plot\r\n returns None\r\n \"\"\"\r\n index = {0: (0, 0), 1: (0, 1), 2: (1, 0), 3: (1, 1)}\r\n\r\n fig, axes = plt.subplots(2, 2)\r\n fig.set_size_inches(14, 7)\r\n\r\n for i in range(len(gammas)):\r\n g1, g2 = gammas[i][0], gammas[i][1]\r\n draws = np.asarray(samples.posterior['gamma'])[:, :, g1, g2]\r\n iteration = list(range(draws.shape[1]))\r\n x = index[i][0]\r\n y = index[i][1]\r\n chains = dict()\r\n\r\n for j in range(draws.shape[0]):\r\n chains[j] = draws[j, :]\r\n\r\n for key in chains.keys():\r\n axes[x][y].plot(\r\n iteration,\r\n chains[key],\r\n label='Chain {}'.format(key),\r\n alpha=0.7\r\n )\r\n\r\n axes[x][y].title.set_text('gamma[{},{}]'.format(g1, g2))\r\n\r\n axes[x][y].legend(loc='upper right')\r\n\r\n return None\r\n\r\n\r\ndef plot_elbo(glicko_vi):\r\n \"\"\"\r\n Function to plot elbo figure\r\n :param glicko_vi: Trained model\r\n returns None\r\n \"\"\"\r\n for fname in glicko_vi.runset._stdout_files:\r\n with open(fname, \"r\") as f:\r\n text = f.read()\r\n\r\n text = text.split('\\n')\r\n idx = text.index('Begin stochastic gradient ascent.')\r\n\r\n elbos = []\r\n deltas = []\r\n iterations = []\r\n\r\n for i in range(idx + 2, len(text) - 4):\r\n cache = [x for x in text[i].split(\" \") if x != \"\"]\r\n iterations.append(float(cache[0]))\r\n elbos.append(float(cache[1]))\r\n deltas.append(float(cache[2]))\r\n\r\n fig, ax1 = plt.subplots()\r\n fig.set_size_inches(7, 4)\r\n\r\n color = 'tab:red'\r\n\r\n ax1.set_xlabel('Iterations')\r\n ax1.set_ylabel('ELBO', color=color)\r\n\r\n ax1.plot(iterations, elbos, color=color)\r\n ax1.tick_params(axis='y', labelcolor=color)\r\n\r\n ax2 = ax1.twinx()\r\n color = 'tab:blue'\r\n ax2.set_ylabel('Delta', color=color)\r\n ax2.plot(iterations, deltas, color=color)\r\n ax2.tick_params(axis='y', labelcolor=color)\r\n fig.tight_layout()\r\n plt.title('Convergence of ELBO')\r\n plt.show()\r\n\r\n return None\r\n\r\n\r\ndef plot_loglikelihood(glicko_map):\r\n \"\"\"\r\n Function to plot ll figure\r\n :param glicko_map: Trained model\r\n returns None\r\n \"\"\"\r\n for fname in glicko_map.runset._stdout_files:\r\n with open(fname, \"r\") as f:\r\n text = f.read()\r\n\r\n with open('./criticism/map_split.txt') as f:\r\n split = str(f.read())\r\n\r\n len_splitted = len(text.split(split))\r\n splitted = text.split(split)\r\n iterations = []\r\n loglikelihoods = []\r\n deltas = []\r\n for i in range(1, len_splitted):\r\n cache = [x for x in splitted[i].strip().split(' ') if x != '']\r\n\r\n iterations.append(float(cache[0]))\r\n loglikelihoods.append(float(cache[1]))\r\n deltas.append(float(cache[2]))\r\n\r\n fig, ax1 = plt.subplots()\r\n fig.set_size_inches(7, 4)\r\n\r\n color = 'tab:red'\r\n\r\n ax1.set_xlabel('Iterations')\r\n ax1.set_ylabel('LogLikelihood', color=color)\r\n\r\n ax1.plot(iterations, loglikelihoods, color=color)\r\n ax1.tick_params(axis='y', labelcolor=color)\r\n\r\n ax2 = ax1.twinx()\r\n color = 'tab:blue'\r\n ax2.set_ylabel('Delta', color=color)\r\n ax2.plot(iterations, deltas, color=color)\r\n ax2.tick_params(axis='y', labelcolor=color)\r\n fig.tight_layout()\r\n plt.title('Convergence of LogLikelihood')\r\n plt.show()\r\n\r\n return None\r\n\r\n\r\ndef plot_bce(iteration, bce, delta):\r\n \"\"\"\r\n Function to plot bce figure\r\n :param iteration: Iteration num\r\n :param bce: BCE val\r\n :param delta: Delta val\r\n \"\"\"\r\n fig, ax1 = plt.subplots()\r\n fig.set_size_inches(7, 4)\r\n\r\n color = 'tab:red'\r\n\r\n ax1.set_xlabel('Iterations')\r\n ax1.set_ylabel('BCE', color=color)\r\n\r\n ax1.plot(iteration, bce, color=color)\r\n ax1.tick_params(axis='y', labelcolor=color)\r\n\r\n ax2 = ax1.twinx()\r\n color = 'tab:blue'\r\n ax2.set_ylabel('Delta', color=color)\r\n ax2.plot(iteration, delta, color=color)\r\n ax2.tick_params(axis='y', labelcolor=color)\r\n fig.tight_layout()\r\n plt.title('Convergence of BCE')\r\n plt.show()\r\n\r\n return None\r\n","repo_name":"ketencimert/probprog-finalproject","sub_path":"criticism/criticism.py","file_name":"criticism.py","file_ext":"py","file_size_in_byte":10180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37872631355","text":"from navigation.models import Table, Column, ColumnType\n\n\ndef destroy():\n columns = Column.objects.all()\n columns.delete()\n return \"DELETED\"\n\n\ndef create():\n column_types = ColumnType.objects.filter(category__common=True).all()\n tables = Table.objects.all()\n for table in tables.all():\n for columnType in column_types.all():\n column = Column.objects.create(table=table,\n columnType=columnType,\n number=columnType.number,\n active=columnType.active,\n editable=columnType.editable)\n column.save()\n return \"CREATED\"\n","repo_name":"AlexanderAvramchuk/edem","sub_path":"navigation/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5448388266","text":"#add number\na = input('Your four-digit natural number:')\n\n#1 the product of the digits of this number\n#I split the entered number into 4 separate numbers and convert them into a numeric format\none = int(a[0])\ntwo = int(a[1])\ntree = int(a[2])\nfour = int(a[3])\n\n#I calculate the product of the four obtained numbers\nproduct = one*two*tree*four\n\nprint(f'The product of the digits of {a}:',product)\n\n#write a number in revers order\nb = a[::-1]\nprint(f'Number {a} in revers order:', b)\n\n#in ascending order sort the numbers included in the given number\nprint(f'Number {a} in ascending order:', sorted(a))","repo_name":"kolyasalubov/UA-12-10-23.PythonFundamentals","sub_path":"VHanna/HW3/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"15164961562","text":"# Write a python script to add two numbers 25 (in octal) and 39 (in hexadecimal) and\r\n# display the result in binary format.\r\n\r\ntest_string1 = input(\"Enter octal Number\")\r\nprint(\"The original string : \" +str(test_string1))\r\nres = int(test_string1, 8)\r\n\r\n\r\ntest_string2=input(\"Enter a hexadecimal number \")\r\nprint(\"The original string : \" +str(test_string1))\r\nres1=int(test_string2,16)\r\n\r\n\r\nres_bin=bin(res+res1)\r\n\r\nprint(\"Binary number \\string : \" + str(res_bin))\r\n","repo_name":"rohitgupta9/Full_Stack_-Python-_with-Django","sub_path":"Assignment 3/Solution_10.py","file_name":"Solution_10.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22940024583","text":"'''\n@File: queryDataSampleRankListingApi.py\n@time:2021/9/1\n@Author:quanliu\n@Desc:数据采集-查询接口服务类(amazon/smt/1688/ebay/shopee全部页面共用)\n'''\nfrom apps.AllSystemData.DasSystem.das_api.publicCommonJudgeEmptySevice import PublicCommonJudgeEmptySevice\nfrom apps.AllSystemData.DasSystem.das_api.publicCommonParamService import PublicCommonParamServiceClass\nfrom apps.AllSystemData.DasSystem.das_api.publicCommonUrlSevice import PublicCommonUrlServiceClass\nfrom apps.Common_Config.operate_api_data import api_assemble_new\nfrom flask import current_app as app\nfrom apps.Common_Config.parseRequestDatas import parseRequestDatas\nimport json\n\n\n@api_assemble_new()\ndef dataSampleRankListingFunction(platform,searchType,kwargs):\n app.logger.info(\"dataSampleRankListingFunction------------------->start\")\n # 判断哪个页面的数据需要对入参进行判空\n isNeedEmpty = PublicCommonJudgeEmptySevice().needJudgeEmpty(platform, searchType,kwargs)\n if isNeedEmpty == True:\n country = parseRequestDatas(\"country\", kwargs) # 站点判空\n if country == \"\" or searchType == \"\":\n app.logger.error(\"dataSampleRankListingFunction--------->InputParam:country or searchType is null\")\n return \"请求参数country或searchType为空\"\n # 获取请求参数\n rankListing03,rankListing02,rankListing01 = PublicCommonParamServiceClass().getApiInputParam(platform,searchType)\n url = PublicCommonUrlServiceClass().getApiUrl(platform,searchType) # 获取请求地址\n keyList = []\n if kwargs != \"\":\n for key in kwargs.keys():\n keyList.append(key)\n for i in range(len(keyList)):\n value = parseRequestDatas(keyList[i],kwargs)\n rankListing03[keyList[i]] = value\n # 替换中间层\n rankListing02[\"search\"] = rankListing03\n # 替换最外层参数\n rankListing01[\"args\"] = json.dumps(rankListing02)\n return url,rankListing01\n","repo_name":"lhh5036/interface","sub_path":"apps/AllSystemData/DasSystem/das_api/platform_dataSample/queryDataSampleRankListingApi.py","file_name":"queryDataSampleRankListingApi.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3214106548","text":"\"\"\" Module to grab CHL data\"\"\"\nimport subprocess\nimport os\nimport numpy as np\n\nfrom IPython import embed\n\ndef grab_cmems(clobber=False):\n\n for year in np.arange(1993, 2021, dtype=int): \n for month in np.arange(1, 13, dtype=int): \n outfile = f'chl_{year}-{month}.nc'\n out_path = os.path.join(os.getenv('CMEMS'), 'CHL') \n if os.path.isfile(os.path.join(out_path, outfile)) and not clobber:\n print(f\"{outfile} exists. Skipping\")\n continue\n # \n print(f\"Downloading {outfile}\")\n #\n if month < 12:\n date_max = f'{year}-{month+1}-1 00:00:00' \n else:\n date_max = f'{year}-{month}-31 13:00:00' \n command = [\n 'motuclient',\n '--motu', \n 'https://my.cmems-du.eu/motu-web/Motu',\n '--service-id', \n 'GLOBAL_MULTIYEAR_BGC_001_029-TDS',\n '--product-id', \n 'cmems_mod_glo_bgc_my_0.25_P1D-m', \n '--longitude-min', '-180', \n '--longitude-max', '179.75', \n '--latitude-min','-90', \n '--latitude-max', '90', \n '--date-min', f'{year}-{month}-1 00:00:00',\n '--date-max', date_max,\n '--depth-min', '0.5057', \n '--depth-max', '0.5058',\n '--variable', 'chl',\n '--out-dir', out_path,\n '--out-name', outfile,\n '--user', 'ssakrison', \n '--pwd', '2Magoosak2',\n ]\n pw = subprocess.Popen(command)\n pw.wait()\n\nif __name__ == '__main__':\n grab_cmems()","repo_name":"profxj/mhw_analysis","sub_path":"mhw_analysis/chl/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10137346181","text":"\"\"\"\nThis is the most basic implementation of MarketUp formulation.\n\nThis version uses CPLEX as a solver.\n\nCreated by Rohit Karvekar (Aug, 22) for Mip Wise.\n\"\"\"\n\nfrom docplex.mp.model import Model\n\n# Input Data\n# marketing channels\nmc = {1: 'Print', 2: 'TV', 3: 'SEO', 4: 'Social Media'}\nI = list(mc)\n# expected ROI\nr = {1: 1.16, 2: 1.09, 3: 1.06, 4: 1.14}\n# expected market penetration\np = {1: 2.1, 2: 2.5, 3: 3.0, 4: 0.9}\n# total budget\ntb = 1_000_000\n# print budget\npb = 100_000\n# viewer target\nvt = 1_500_000\n# minimum conventional channel allocation\nca = 0.4\n\n# Define the model\nmdl = Model('market_up')\n\n# Add variables\nx = mdl.var_dict(keys=I, vartype=mdl.continuous_vartype, name='x')\n\n# Add Constraints\n# C1) Can't exceed the total budget\nmdl.add_constraint(mdl.sum(x) <= tb, ctname='C1')\n# C2) Minimum allocation to conventional channels\nmdl.add_constraint(sum(x[i] for i in [1, 2]) >= ca * tb, ctname='C2')\n# C3) Can't exceed the print budget\nmdl.add_constraint(x[1] <= pb, ctname='C3')\n# C4) Social Media investment must be at most three times SEO investment\nmdl.add_constraint(x[4] <= 3 * x[3], ctname='C4')\n# C5) Reach minimum viewers target\nmdl.add_constraint(sum(p[i] * x[i] for i in I) >= vt, ctname='C5')\n\n# Set the objective function\ntotal_roi = sum(r[i] * x[i] for i in I)\nmdl.maximize(total_roi)\n\n# Optimize\nmdl.solve()\n\n# Retrieve the solution\nx_sol = {mc[i]: int(x[i].solution_value) for i in I}\nprint(f'Total ROI: {total_roi.solution_value}')\nprint(f'Optimal budget allocation: {x_sol}')\n\n","repo_name":"mipwise/use-cases","sub_path":"marketup/scripts/marketup_cplex.py","file_name":"marketup_cplex.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7488855484","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: donald\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport zipfile\r\nimport urllib.request\r\nimport os\r\n\r\npd.set_option('display.max_columns', None)\r\npd.set_option('display.max_rows', None)\r\n\r\n\r\nos.chdir(\"C:\\\\Users\\\\donald\\\\Desktop\\\\Python\\\\callreport\")\r\n\r\n# Download the ZIP file from the NCUA website - change quarter to yyyy-03 or 06 or 09 or 12\r\nquarter = '2022-12' #YYYY-MM representing the quarter - MM is 03, 06, 09, or 12\r\nurl = 'https://ncua.gov/files/publications/analysis/call-report-data-'+quarter+'.zip'\r\nfilename = quarter+'.zip'\r\nurllib.request.urlretrieve(url, filename)\r\n\r\n# Extract the contents of the ZIP file\r\nwith zipfile.ZipFile(filename, 'r') as zip_ref:\r\n zip_ref.extractall()\r\n\r\n# Read the CSV files into dataframes\r\ndf_Acctdesc = pd.read_csv('AcctDesc.txt')\r\ndf_branches = pd.read_csv('Credit Union Branch Information.txt')\r\ndf_fs220 = pd.read_csv('FS220.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER', 'ACCT_010', 'ACCT_013', 'ACCT_025A', 'ACCT_031B', 'ACCT_041B', 'ACCT_083', 'ACCT_300', 'ACCT_340', 'ACCT_380', 'ACCT_457', 'ACCT_523', 'ACCT_524', 'ACCT_550', 'ACCT_602', 'ACCT_657', 'ACCT_671', 'ACCT_860C', 'ACCT_968', 'ACCT_025B'])\r\ndf_fs220a = pd.read_csv('FS220A.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER', 'Acct_010A', 'ACCT_115', 'ACCT_117', 'ACCT_119', 'ACCT_131', 'ACCT_210', 'ACCT_230', 'ACCT_250', 'ACCT_260', 'ACCT_270', 'ACCT_280', 'ACCT_290', 'ACCT_310', 'ACCT_320', 'ACCT_350', 'ACCT_360', 'ACCT_370', 'ACCT_381', 'ACCT_385', 'ACCT_396', 'ACCT_397', 'ACCT_452', 'ACCT_453', 'ACCT_454', 'ACCT_455', 'ACCT_458', 'ACCT_460', 'ACCT_617A', 'ACCT_618A', 'Acct_661A', 'Acct_730A', 'Acct_730B', 'Acct_997', 'Acct_998'])\r\ndf_fs220b = pd.read_csv('FS220B.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER', 'ACCT_065A4', 'ACCT_067A2', 'ACCT_068A', 'ACCT_069A', 'ACCT_966'])\r\ndf_fs220c = pd.read_csv('FS220C.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER', 'ACCT_690', 'ACCT_691', 'ACCT_730B1', 'ACCT_730B2'])\r\ndf_fs220d = pd.read_csv('FS220D.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER', 'Acct_700'])\r\ndf_fs220g = pd.read_csv('FS220G.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER','ACCT_658A', 'ACCT_691L', 'ACCT_851', 'ACCT_852', 'ACCT_853'])\r\ndf_fs220h = pd.read_csv('FS220H.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER','Acct_397A'])\r\ndf_fs220l = pd.read_csv('FS220L.txt', usecols=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER','ACCT_386A', 'ACCT_386B'])\r\ndf_fs220n = pd.read_csv('FS220N.txt', usecols=['CU_Number', 'CYCLE_DATE', 'JOIN_NUMBER','ACCT_AS0003', 'ACCT_AS0004', 'ACCT_AS0005', 'ACCT_AS0010'])\r\ndf_fs220n = df_fs220n.rename(columns={'CU_Number':'CU_NUMBER'})\r\ndf_fs220p = pd.read_csv('FS220P.txt', usecols=['CU_NUMBER', 'Cycle_date','join_number','ACCT_AS0007', 'ACCT_AS0008', 'ACCT_AS0009', 'ACCT_AS0013', 'ACCT_AS0016', 'ACCT_AS0017', 'ACCT_AS0036', 'ACCT_DL0002', 'ACCT_DL0030', 'ACCT_DL0037'])\r\ndf_fs220p = df_fs220p.rename(columns={'Cycle_date':'CYCLE_DATE', 'join_number':'JOIN_NUMBER'})\r\ndf_FOICU = pd.read_csv('FOICU.txt', usecols=['CU_NUMBER','CYCLE_DATE','JOIN_NUMBER','CU_NAME', 'CITY','STATE'])\r\n\r\ndfs = [df_FOICU, df_fs220, df_fs220a, df_fs220b, df_fs220c, df_fs220d, df_fs220g, df_fs220h, df_fs220l, df_fs220n, df_fs220p]\r\n\r\n\r\n# join the dataframes on cu_number, cycle_date, and join_number\r\nmerged_df = pd.merge(dfs[0], dfs[1], on=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER'], how='outer')\r\nfor i in range(2, len(dfs)):\r\n merged_df = pd.merge(merged_df, dfs[i], on=['CU_NUMBER', 'CYCLE_DATE', 'JOIN_NUMBER'], how='outer')\r\n\r\n\r\nmerged_df.to_excel(quarter + '.xlsx')","repo_name":"donaldmears/NCUA_5300_Call_Report_Data","sub_path":"callreportdata.py","file_name":"callreportdata.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1147723861","text":"#https://school.programmers.co.kr/learn/courses/30/lessons/136798\n'''\nnumber : 기사 단원의 수\nlimit : 공격력 제한수치\npower : 제한수치 초과 공격력\n\nresult : 무기를 위한 철의 무게\n'''\n\ndef all_divider(value):\n answer = []\n for i in range(1, value+1):\n answer.append(get_divider(i))\n return answer\n\ndef get_divider(value):\n answer = []\n for i in range(1, int(value**(1/2))+1):\n if value % i == 0:\n answer.append(i)\n print(answer, int(value**(1/2))+1)\n if i ** 2 != value:\n answer.append(value//i)\n print(\"제곱\" ,answer, value, i ** 2)\n # print(answer)\n return len(answer)\n \n\ndef solution(number, limit, power):\n result = 0\n for i in all_divider(number):\n if i > limit: result += power\n else: result += i\n return result\n\n \n \n\nget_divider(25)\n\n\n# solution(5,3,2)\n# print(solution(10,3,2))","repo_name":"julia0926/TIL_Algo","sub_path":"Study/Programmers_lv1/136798_기사단원의무기.py","file_name":"136798_기사단원의무기.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73374731252","text":"from datetime import datetime\r\nfrom keras.layers import Dense, LSTM\r\nfrom keras.models import Sequential, model_from_json\r\nfrom math import sqrt\r\nfrom matplotlib import pyplot\r\nfrom pandas import concat\r\nfrom sklearn.metrics import mean_squared_error\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n \r\nALL_COLUMNS = ['Date','Time','TimeID','DayType','LotID','CityID','LotName','FreeParking','TotalJamINRadius_1','TotalJamOUTRadius_1','TotalJamINRadius_2','TotalJamOUTRadius_2','TotalJamINRadius_3','TotalJamOUTRadius_3','AvgINForRadius_1','AvgOUTForRadius_1','AvgINForRadius_2','AvgOUTForRadius_2','AvgINForRadius_3','AvgOUTForRadius_3'];\r\nPROCESS_COLUMNS = ['Date', 'TimeID','DayType', 'FreeParking','TotalJamINRadius_1','TotalJamOUTRadius_1','TotalJamINRadius_2','TotalJamOUTRadius_2','TotalJamINRadius_3','TotalJamOUTRadius_3'];\r\nREINDEX_COLUMNS = ['FreeParking', 'DayType', 'TotalJamINRadius_1','TotalJamOUTRadius_1','TotalJamINRadius_2','TotalJamOUTRadius_2','TotalJamINRadius_3','TotalJamOUTRadius_3'];\r\n \r\n\r\n\r\n#number of previous time ID's. 15 Time ID's is equal to 1 hour \r\nnr_time_ids = 15;\r\nn_features = 8;\r\n# fix random seed for reproducibility\r\nnp.random.seed(7)\r\n\r\n#config\r\ncsv_file_name = \"DataCSV/5_G22-Seattle.csv\";\r\nmodel_name = \"Seattle_LSTM\";\r\noptimizer = \"adam\";\r\nloss = \"mse\";\r\nmetrics = ['mae'];\r\nepochs = 50;\r\nbatch_size = 5;\r\npercentage_training = 0.8;\r\nmodel_root_folder = \"NeuralNetworks/\";\r\n\r\n \r\ndef parse(x):\r\n return datetime.strptime(x, '%d/%m/%Y %H:%M')\r\n \r\n# Convert series to supervised learning\r\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\r\n n_vars = 1 if type(data) is list else data.shape[1]\r\n df = pd.DataFrame(data)\r\n cols, names = list(), list()\r\n # input sequence (t-n, ... t-1)\r\n for i in range(n_in, 0, -1):\r\n cols.append(df.shift(i))\r\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\r\n # forecast sequence (t, t+1, ... t+n)\r\n for i in range(0, n_out):\r\n cols.append(df.shift(-i))\r\n if i == 0:\r\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\r\n else:\r\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\r\n # put it all together\r\n agg = concat(cols, axis=1)\r\n agg.columns = names\r\n # drop rows with NaN values\r\n if dropnan:\r\n agg.dropna(inplace=True)\r\n return agg\r\n \r\n#Read the csv file\r\ndef read_and_process_csv (name):\r\n \r\n # Read the data from CSV file \r\n dataset = pd.read_csv(name, sep=';', usecols=PROCESS_COLUMNS, parse_dates = [['Date', 'TimeID']], index_col=0, date_parser=parse);\r\n dataset.index.name = 'date'\r\n dataset = dataset.reindex(columns = REINDEX_COLUMNS);\r\n \r\n return dataset;\r\n \r\n#Prepare the data for LSTM networks\r\ndef get_data (name):\r\n \r\n #Get the CSV file\r\n dataset = read_and_process_csv(name);\r\n values = dataset.values\r\n \r\n # ensure all data is float\r\n values = values.astype('float32')\r\n \r\n # normalize features\r\n #scaler = MinMaxScaler(feature_range=(0, 1))\r\n #scaled = scaler.fit_transform(values)\r\n \r\n reframed = series_to_supervised(values, nr_time_ids, 1)\r\n values = reframed.values\r\n \r\n # split into train and test sets\r\n #n_train_hours\r\n n_train_hours = int(values.shape[0]*percentage_training);\r\n train = values[:n_train_hours, :]\r\n test = values[n_train_hours:, :]\r\n \r\n # split into input and outputs\r\n n_obs = nr_time_ids * n_features\r\n train_X, train_y = train[:, :n_obs], train[:, -n_features]\r\n test_X, test_y = test[:, :n_obs], test[:, -n_features]\r\n \r\n \r\n # reshape input to be 3D [samples, timesteps, features]\r\n train_X = train_X.reshape((train_X.shape[0], nr_time_ids, n_features))\r\n test_X = test_X.reshape((test_X.shape[0], nr_time_ids, n_features))\r\n\r\n return train_X, train_y, test_X, test_y;\r\n \r\n\r\ndef save_model (model, name):\r\n # serialize model to JSON\r\n model_json = model.to_json()\r\n with open(model_root_folder+name+\".json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n\r\n model.save_weights(model_root_folder+name+\".h5\")\r\n \r\ndef load_model (name):\r\n \r\n # load json and create model\r\n json_file = open(model_root_folder+name+'.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n loaded_model = model_from_json(loaded_model_json)\r\n # load weights into new model\r\n loaded_model.load_weights(model_root_folder+name+\".h5\")\r\n loaded_model.compile(loss=loss, optimizer=optimizer, metrics=metrics);\r\n \r\n return loaded_model\r\n\r\ndef evaluate_model():\r\n \r\n _, _, test_X, test_y = get_data(csv_file_name);\r\n model = load_model(model_name);\r\n \r\n #Evaluate the model\r\n score = model.predict(test_X)\r\n \r\n # calculate RMSE\r\n rmse = sqrt(mean_squared_error(score, test_y))\r\n print('Test RMSE: %.3f' % rmse)\r\n \r\n \r\n pyplot.plot(test_y)\r\n pyplot.plot(score)\r\n pyplot.ylabel('Number of parking spaces')\r\n pyplot.xlabel('Numebr of records')\r\n pyplot.legend(['Test Data', 'Predicted Data'], loc='upper left')\r\n pyplot.show()\r\n\r\n print(\"Model was evaluated\");\r\n \r\ndef train_lstm():\r\n \r\n train_X, train_y, test_X, test_y = get_data(csv_file_name);\r\n \r\n # design network\r\n model = Sequential()\r\n model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))\r\n model.add(Dense(1))\r\n model.compile(loss=loss, optimizer=optimizer, metrics=metrics)\r\n \r\n\r\n model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_X, test_y), verbose=1, shuffle=False)\r\n\r\n save_model(model, model_name);\r\n print(\"Training the model has finished\");\r\n \r\n \r\n \r\ntrain_lstm();\r\nevaluate_model();","repo_name":"iklandev/DeepLearning","sub_path":"Old/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19551262028","text":"\"\"\"Demonstrates dictionaries.\nFrom: https://qr.ae/TWCAvj:\nPython uses dictionaries all over the place:\n- the variables and functions in a module - stored in a dictionary # can be shown using globals()\n- the local variables in a function - stored in a dictionary # can be shown using locals(); see functions.py\n- the implementation of a function - a dictionary\n- a class is a dictionary\n- an instance of a class is another dictionary\n- the modules your program has imported - you guessed it - another dictionary\n- even Python set objects are implemented as modified dictionaries\nTo paraphrase Tim Peter's 'Zen of Python': \"dictionaries are great - let's do more of them\".\nRead more at https://qr.ae/TWCAvj.\n\"\"\"\n\n\n#%%\ndef demonstrate_dictionaries():\n \"\"\"Creating and using dictionaries.\n - create a blank (empty) dictionary\n - create a non-empty dictionary\n - access dictionary values by the corresponding keys (syntax: value = d[key])\n - print a non-empty dictionary\n - print all items using the items() function\n - print one item per line\n - pprint dictionary in one column\n - add/remove items to/from a dictionary\n - update a dictionary with the items from another dictionary or from an iterable of (k, v) pairs using dict.update()\n - using the keys() and values() functions\n \"\"\"\n\n # mick = {}\n # print(type(mick))\n # print(mick)\n # print()\n\n glimmer_twins = {'mick': 'Mick Jagger', 'keith': 'Keith Richards', 'birth_year': 1943}\n print(glimmer_twins)\n print()\n\n # mick = glimmer_twins['mick']\n # print(mick)\n # print()\n\n # # print(glimmer_twins.items())\n # for k, v in glimmer_twins.items():\n # print(k + ':', v)\n # print()\n\n # from pprint import pprint\n # pprint(glimmer_twins, width=1)\n # print()\n\n glimmer_twins['band'] = 'The Rolling Stones'\n # print(glimmer_twins.items())\n # print(glimmer_twins)\n # print()\n\n # del glimmer_twins['band']\n # print(glimmer_twins)\n # print()\n\n other = {'age': 80, 'city': 'Dartford'}\n songs = [('angie', 'Angie'), ('no_expectations', 'No Expectations')]\n\n # # glimmer_twins.update(other)\n # # print(glimmer_twins)\n # # print()\n # # glimmer_twins.update(songs)\n # # # glimmer_twins.update([('other', other)]) # not possible, in update() the v in (k, v) must be str or int\n # glimmer_twins['other'] = other\n # print(glimmer_twins)\n # print(glimmer_twins['other']['age'])\n # print()\n\n print(glimmer_twins.keys())\n print(glimmer_twins.values())\n\n\n#%%\n# Test demonstrate_dictionaries()\ndemonstrate_dictionaries()\n\n\n#%%\ndef sort_dictionary(d, by):\n \"\"\"Sorting a dictionary by keys or by values.\n - using zip()\n - using operator.itemgetter()\n - using lambda\n \"\"\"\n\n # if by == 'k' or by == 'K':\n # return dict(sorted(zip(d.keys(), d.values())))\n # if by == 'v' or by == 'V':\n # return dict(sorted(zip(d.values(), d.keys())))\n # return None\n\n # from operator import itemgetter\n #\n # if by == 'k' or by == 'K':\n # return dict(sorted(d.items(), key=itemgetter(0)))\n # if by == 'v' or by == 'V':\n # return dict(sorted(d.items(), key=itemgetter(1)))\n # return None\n\n if by == 'k' or by == 'K':\n return dict(sorted(d.items(), key=lambda x: x[0]))\n if by == 'v' or by == 'V':\n return dict(sorted(d.items(), key=lambda x: x[1]))\n return None\n\n\n#%%\ndef demonstrate_dict_sorting():\n \"\"\"Demonstrate sorting a dictionary.\n \"\"\"\n\n from pprint import pprint\n\n songs = {2: 'Angry', 1: 'Brown Sugar', 3: 'Jumpin\\' Jack Flash'}\n glimmer_twins = {'mick': 'Mick Jagger', 'keith': 'Keith Richards', 'birth_year': 1943}\n glimmer_twins = {'mick': 'Mick Jagger', 'keith': 'Keith Richards', 'birth_year': '1943'}\n\n # pprint(sort_dictionary(songs, by='k'))\n # pprint(sort_dictionary(songs, by='v'))\n # pprint(sort_dictionary(songs, by='d'))\n pprint(sort_dictionary(glimmer_twins, by='k'))\n pprint(sort_dictionary(glimmer_twins, by='v'))\n pprint(sort_dictionary(glimmer_twins, by='d'))\n\n\n#%%\n# Test demonstrate_dict_sorting()\ndemonstrate_dict_sorting()\n\n","repo_name":"programiranje3/2023","sub_path":"python/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6627073547","text":"import sys\nstring1 = ' '+ sys.stdin.readline().rstrip()\nstring2 = ' '+sys.stdin.readline().rstrip()\n\ndp_str=[[0]*(len(string2)) for _ in range((len(string1)))]\n#dp_str=[0]*len(string1)\nfor i in range(1, len(string1)):\n for j in range(1, len(string2)):\n\n if string1[i]==string2[j]:\n dp_str[i][j]=dp_str[i-1][j-1]+1\n else:\n dp_str[i][j]=max(dp_str[i-1][j], dp_str[i][j-1])\nprint(dp_str)\nprint(dp_str[len(string1)-1][len(string2)-1])\n\n\"\"\"\nlcs알고리즘이 있고, dp를 이용해서 푸는 문제\n사실 알고리즘이 있는지는 모르겠다만 암튼 코드 보면 아는 것 처럼\n문자열이 같다면 왼쪽 위에값 기준으로 +1 한값을 더해준다.\n그 값을 계속 해서 맨 마지막 dpStr배열값 보면 알겠지만 어떤 문자열이 같게 걸려서 나왔는지 알 수 있다.\"\"\"","repo_name":"kkkmj/algorithm_study","sub_path":"문자열/9251.py","file_name":"9251.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36474581871","text":"from typing import Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow\nfrom keras.utils import vis_utils\nfrom sklearn.metrics import confusion_matrix\n\n\ndef predict(model: tensorflow.keras.models, test_data: np.ndarray, softmax: bool = False) -> np.ndarray:\n \"\"\"\n Predict the results of the model for the given test_data\n Args:\n model: Model that is used to predict the results\n test_data: Data for which the labels are predicted\n softmax: Boolean whether the model returns the softmax predictions or argmax\n Returns:\n predictions: np array containing the predictions\n \"\"\"\n predictions = model.predict(test_data)\n if not softmax:\n # softmax means that the percentages for 0 and 1 are returned\n predictions = np.argmax(predictions, axis=1)\n return predictions\n\n\ndef average_of_neighbors(predicted_labels: np.ndarray, neighbors: int) -> np.ndarray:\n \"\"\"\n Postprocessing method used in the original paper.\n Args:\n predicted_labels: Argmax labels of the predicted labels\n neighbors: int which is the number of neighbors of which the average is taken. Is split equally on both sides.\n Default value for this in the paper is 5, which means 2 on each side.\n Returns:\n predicted_post_processing: Updated predicted labels\n \"\"\"\n neighbors_site = int(neighbors/2) # get the neighbors on each site, if uneven number round to lower value\n print(f\"Postprocessing using {neighbors_site} neighbors\")\n predicted_post_processing = []\n for pred_idx in range(len(predicted_labels)):\n start = max(pred_idx - neighbors_site, 0)\n end = min(len(predicted_labels), pred_idx + neighbors_site + 1) # +1 as last element is not included in slice\n avg = np.average(predicted_labels[start:end])\n prediction = int(np.rint(avg)) # round to either 0 or 1\n predicted_post_processing.append(prediction)\n predicted_post_processing = np.array(predicted_post_processing)\n return predicted_post_processing\n\n\ndef plot_confusion_matrix(predicted_labels: np.ndarray, y_test: np.ndarray,\n percentage: bool = False) -> confusion_matrix:\n \"\"\"\n Function to plot a confusion matrix. If percentage is set, the values are given as percentages\n instead of total numbers.\n Args:\n predicted_labels: List of predicted labels\n y_test: Array containing the true labels\n percentage: If True, prints confusion matrix with percentages, otherwise with whole numbers\n Returns:\n cf_matrix: Confusion matrix\n \"\"\"\n correct_labels = np.argmax(y_test, axis=1)\n # predictions_reshaped = predictions.reshape(-1, 2)\n # correct_reshaped = y_test.reshape(-1, 2)\n\n cf_matrix = confusion_matrix(correct_labels, predicted_labels)\n if percentage:\n conf_matrix = np.array(cf_matrix)\n cf_matrix = conf_matrix / (conf_matrix[0][0] + conf_matrix[0][1] + conf_matrix[1][0] + conf_matrix[1][1])\n\n print(cf_matrix)\n plt.figure(figsize=(16, 9))\n sns.heatmap(cf_matrix, annot=True, xticklabels=['no speech', 'speech'],\n yticklabels=['no speech', 'speech'], fmt='g')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return cf_matrix\n\n\ndef calculate_hter(cf_matrix: confusion_matrix) -> Tuple[float, float, float]:\n \"\"\"\n Calculates HTER from confusion matrix\n Args:\n cf_matrix: Confusion matrix of predicted and true labels\n Returns:\n hter: Half-Total Error Rate\n mr: Miss Rate\n far: False Alarm Rate\n \"\"\"\n mr = cf_matrix[1][0]/(sum(cf_matrix[1]))\n far = cf_matrix[0][1]/(sum(cf_matrix[0]))\n hter = (mr+far)/2*100\n return hter, mr, far\n\n\ndef visualize_model(model: tensorflow.keras.models) -> None:\n \"\"\"\n Function to visualize the model using vis_utils from keras.\n Args:\n model: Model that will be visualized\n \"\"\"\n vis_utils.plot_model(model, rankdir='TB',\n to_file='test.png',\n show_shapes=True,\n show_layer_names=True)\n","repo_name":"nadbot/Exploring-Convolutional-Neural-Networks-for-Voice-Activity-Detection","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38095600075","text":"import os\nimport json\nimport boto3\nfrom decimal import *\nfrom datetime import datetime as dt\n\n# setting up client for dynamodb\ntable_name = os.getenv('DYNAMODB_TABLE')\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table(table_name)\n\n# scanning dynamodb table for items and get count\ndef scan_result():\n scan = table.scan()\n return scan['Items'], scan['ScannedCount']\n\ndef lambda_handler(message, context):\n \n items, count = scan_result()\n \n sequence_label = message['Payload']['sequenceLabel']\n \n # base case: if zero or one values exist, return 0 and db item count\n if(count <= 1):\n first_val, second_val = 0, count\n else:\n # sorting items based on fib_val descending to get 2 max values\n get_fib_value = lambda i: int(i['fib_value'])\n max_vals = sorted(items, key = get_fib_value, reverse=True)[:2]\n first_val = max_vals[0]['fib_value']\n second_val = max_vals[1]['fib_value']\n\n\n response = {\n 'sequenceLabel': sequence_label,\n 'firstVal': str(first_val),\n 'secondVal': str(second_val),\n 'timestamp': dt.now().strftime(\"%Y-%m-%d %H-%M-%S\"),\n 'statusCode': 200\n }\n \n return response\n\n","repo_name":"alymaquiling/fib-cdkpipelines","sub_path":"app-code/gsa-lambda-b.py","file_name":"gsa-lambda-b.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16070689766","text":"#!/usr/env\nfrom rosa_util import *\n#get O3 from FASTA, note that a \"directed graph\" is required\n\ndef edge(seqnamelist,seqlist,k):\n\tedges = []\n\ttmppair = []\n\tfor i in range(len(seqnamelist)):\n\t\tfor j in range(len(seqnamelist)):\n\t\t\t# check if the suffix k-mer of the i-th sequence = the prefix of the j-th\n\t\t\tif seqlist[i][-k:] == seqlist[j][:k]:\n\t\t\t\t# ensure the two sequences are not the same\n\t\t\t\ttmppair = [seqnamelist[i],seqnamelist[j]]\n\t\t\t\tif len(seqlist[i]) != len(seqlist[j]):\n\t\t\t\t\t# check not duplicated\n\t\t\t\t\tif tmppair not in edges:\n\t\t\t\t\t\t#print seqnamelist[i]+' '+seqnamelist[j]\n\t\t\t\t\t\twriteResult(' '.join(tmppair)+'\\n')\n\t\t\t\t\t\tedges.append(tmppair)\n\t\t\t\telse:\n\t\t\t\t\tfor p in range(len(seqlist[i])):\n\t\t\t\t\t\tif seqlist[i][p] != seqlist[j][p]:\n\t\t\t\t\t\t\tif tmppair not in edges:\n\t\t\t\t\t\t\t\t#print seqnamelist[i]+' '+seqnamelist[j]\n\t\t\t\t\t\t\t\twriteResult(' '.join(tmppair)+'\\n')\n\t\t\t\t\t\t\t\tedges.append(tmppair)\n\t\t\t\t\t\t\tbreak\n\treturn edges\n\nflushResult()\nf = readFASTA('data/rosalind_grph.txt')\nedge(f[0],f[1],3)\n","repo_name":"ccneko/Rosalind","sub_path":"grph.py","file_name":"grph.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888358132","text":"from typing import List\n\nclass Solution:\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n # give the smallest child the smallest cookie which is >= the child's hungry degree\n children = sorted(g)\n cookie = sorted(s)\n i, j = 0, 0\n res = 0\n while i < len(children) and j < len(cookie):\n if cookie[j] >= children[i]:\n res += 1\n j += 1\n i += 1\n else:\n j += 1\n return res\n\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n # give the smallest child the smallest cookie which is >= the child's hungry degree\n children = sorted(g)\n cookies = sorted(s)\n child, cookie = 0, 0\n while child < len(children) and cookie < len(cookies):\n if children[child] <= cookies[cookie]:\n child += 1\n cookie += 1\n return child\n\nif __name__ == '__main__':\n s = Solution()\n print(s.findContentChildren([1,2,3], [1,1])) # 1\n print(s.findContentChildren([1,2], [1,2,3])) # 2\n\n\n\n\n ","repo_name":"xiaofanc/leetcode","sub_path":"0455-assign-cookies.py","file_name":"0455-assign-cookies.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21817818363","text":"import logging\n\nfrom PySide import QtSql\nfrom PySide import QtGui\nfrom PySide import QtCore\nfrom PySide.QtCore import QModelIndex\n\nfrom multiParentTree import MultiParentTree\n\n\ndef getFilterFromIDs(filterIDs, field):\n if filterIDs is None or len(filterIDs) == 0:\n return \"\"\n filterIDs = list(filterIDs)\n if len(filterIDs) == 1:\n return field + \"=%d\" % filterIDs[0]\n else:\n filter = field + \"=%d\" % filterIDs[0]\n for ID in filterIDs[1:]:\n logging.debug(field + \"=%d\" % ID)\n filter = filter + \" OR \" + field + \"=%d\" % ID\n return filter\n\n\nclass TagParentsModel(QtSql.QSqlTableModel):\n def __init__(self, db):\n super(TagParentsModel, self).__init__(None, db)\n self.setTable(\"tag_parents\")\n self.select()\n logging.debug(\"%d rows in TagParentsModel\" % self.rowCount())\n\n def addParentTag(self, tagID, parentTagID):\n record = QtSql.QSqlRecord()\n record.append(QtSql.QSqlField(\"ID\"))\n record.append(QtSql.QSqlField(\"tagID\"))\n record.append(QtSql.QSqlField(\"parentTagID\"))\n record.setValue(1, tagID)\n record.setValue(2, parentTagID)\n return self.insertRecord(self.rowCount() - 1, record)\n\n\nclass TagModel(QtSql.QSqlTableModel):\n def __init__(self, db):\n super(TagModel, self).__init__(None, db)\n self.setTable(\"tags\")\n self.select()\n logging.debug(\"%d rows in TagModel\" % self.rowCount())\n\n self.tagParentsModel = TagParentsModel(db)\n\n self.tree = MultiParentTree()\n self.updateTree()\n\n def addTag(self, name):\n self.setFilter(\"\")\n self.setSort(0, QtCore.Qt.SortOrder.AscendingOrder)\n self.select()\n\n record = QtSql.QSqlRecord()\n record.append(QtSql.QSqlField(\"ID\"))\n record.append(QtSql.QSqlField(\"name\"))\n record.setValue(1, name)\n if not self.insertRecord(self.rowCount() - 1, record):\n raise \"Tag could not be inserted.\"\n newID = int(self.record(self.rowCount() - 1).value(0))\n logging.debug(\"newID=%d\" % newID)\n\n return newID\n\n def removeTag(self, ID):\n self.setFilter(\"ID=%d\" % ID)\n self.select()\n logging.debug(\"removeTag: self.rowCount()=%d\" % self.rowCount())\n if self.rowCount() == 1:\n self.removeRow(0)\n\n def getTagNameFromID(self, ID):\n elem = self.tree.getElementByID(ID)\n if elem is not None:\n return elem.data\n\n def hasID(self, ID):\n return self.tree.hasID(ID)\n\n def getChildIDs(self, ID, filterIDs=None):\n #TODO: Implement filter\n logging.debug(\"getChildIDs(%d)\" % ID)\n if not self.tree.hasID(ID):\n return []\n return self.tree.getElementByID(ID).getChildIDs()\n\n def getAllChildIDs(self, ID):\n childIDs = self.getChildIDs(ID)\n allChildIDs = childIDs\n for childID in childIDs:\n allChildIDs.extend(self.getAllChildIDs(childID))\n return allChildIDs\n\n def fillTreeWidgetWithTags(self, treeWidget, checkable=False, IDstoCheck=set(), filterIDs=None):\n self.updateTree(filterIDs)\n\n # Insert root tags\n rootTags = self.getRootTags(filterIDs)\n logging.debug(\"rootTags=%s\" % str(rootTags))\n for rootID in sorted(rootTags.keys(), reverse=True):\n newElem = QtGui.QTreeWidgetItem([rootTags[rootID], str(rootID)])\n if checkable:\n newElem.setFlags(newElem.flags() | QtCore.Qt.ItemIsUserCheckable)\n newElem.setCheckState(0, QtCore.Qt.Checked)\n if rootID not in IDstoCheck:\n newElem.setCheckState(0, QtCore.Qt.Unchecked)\n else:\n self.expandDownToRoot(newElem)\n treeWidget.insertTopLevelItem(0, newElem)\n self.insertChildTags(rootID, newElem, checkable, IDstoCheck, filterIDs)\n\n def insertChildTags(self, ID, treeWidgetItem, checkable, IDstoCheck, filterIDs):\n if not self.hasID(ID):\n return\n\n childTags = self.getChildTags(ID, filterIDs)\n logging.debug(self.getTagNameFromID(ID) + \": \" + str(childTags))\n for childID in childTags.keys():\n if self.hasID(childID):\n newElem = QtGui.QTreeWidgetItem(treeWidgetItem, [self.getTagNameFromID(childID), str(childID)])\n if checkable:\n newElem.setFlags(newElem.flags() | QtCore.Qt.ItemIsUserCheckable)\n newElem.setCheckState(0, QtCore.Qt.Checked)\n if childID not in IDstoCheck:\n newElem.setCheckState(0, QtCore.Qt.Unchecked)\n else:\n self.expandDownToRoot(newElem)\n if self.hasChildTags(childID):\n self.insertChildTags(childID, newElem, checkable, IDstoCheck, filterIDs)\n\n def expandDownToRoot(self, treeWidgetItem):\n treeWidgetItem.setExpanded(True)\n if treeWidgetItem.parent() is not None:\n self.expandDownToRoot(treeWidgetItem.parent())\n\n def getRootTags(self, filterIDs):\n #TODO: Implement filter\n return self.tree.getRootElementsDict()\n\n def getChildTags(self, ID, filterIDs=None):\n #TODO: Implement filter\n return self.tree.getElementByID(ID).getChildDict()\n\n def hasChildTags(self, ID):\n return self.tree.getElementByID(ID).hasParents()\n\n def getIDsFilteredByName(self, name):\n self.setFilter('name LIKE \"%' + name + '%\"')\n self.select()\n logging.debug(\"Found %d tags with name %s\" % (self.rowCount(), name))\n IDs = [self.record(i).value(\"ID\") for i in range(self.rowCount())]\n return IDs\n\n def getParentIDs(self, ID):\n if self.tree.getElementByID(ID) is not None:\n return self.tree.getElementByID(ID).getParentIDs()\n else:\n return []\n\n def getParentIDsDownToRoot(self, ID):\n allParentIDs = []\n if self.tree.getElementByID(ID) is not None:\n allParentIDs = self.tree.getElementByID(ID).getParentIDs()\n for parentID in allParentIDs:\n allParentIDs.extend(self.getParentIDsDownToRoot(parentID))\n return allParentIDs\n\n def getParentsTags(self, tagID):\n self.tree.getElementByID(tagID).getParentIDs()\n\n def updateTree(self, filterIDs=None):\n self.tree = MultiParentTree()\n\n self.setFilter(getFilterFromIDs(filterIDs, \"ID\"))\n self.select()\n logging.debug(\"FILTER=\" + self.filter())\n logging.debug(\"self.rowCount()=%d\" % self.rowCount())\n for i in range(self.rowCount()):\n ID = self.record(i).value(\"ID\")\n tag = self.record(i).value(\"name\")\n logging.debug(\"ID=%d\" % ID)\n logging.debug(\"tag=%s\" % tag)\n self.tree.insertElement(ID, tag)\n\n self.tagParentsModel.setFilter(getFilterFromIDs(filterIDs, \"tagID\"))\n self.tagParentsModel.select()\n for i in range(self.tagParentsModel.rowCount()):\n parentID = self.tagParentsModel.record(i).value(\"parentTagID\")\n childID = self.tagParentsModel.record(i).value(\"tagID\")\n self.tree.setRelationship(parentID, childID)\n","repo_name":"j-kallwies/IKnow","sub_path":"iknow/tagModel.py","file_name":"tagModel.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23842153571","text":"# IMPORT STATEMENTS\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n# GET REQUEST & HTML CONTENT\nurl = 'https://www.cryptoslam.io/nftglobal'\nresponse = requests.get(url)\n\n# PARSE HTML CONTENT BY BS4\nsoup = BeautifulSoup(response.content, 'html.parser')\n\n# LOCATE TABLE\ntable = soup.find('table')\n\n# CONVERT TABLE ROWS & COLUMNS TO DICT\ntable_rows = table.find_all('tr')\ndata = []\nfor row in table_rows:\n cells = row.find_all('td')\n if len(cells) > 0:\n item = {\n 'Name': cells[0].text.strip(),\n 'Price': float(cells[1].text.strip().replace('$', '').replace(',', '')),\n 'Sales Volume': int(cells[2].text.strip().replace(',', '')),\n 'Sales Value': float(cells[3].text.strip().replace('$', '').replace(',', '')),\n 'Owners': int(cells[4].text.strip().replace(',', '')),\n 'Buyers': int(cells[5].text.strip().replace(',', ''))\n }\n data.append(item)\n\n# PANDAS DATAFRAME\ndf = pd.DataFrame(data)\n","repo_name":"rykalc/STAT4185_Final_Project","sub_path":"FinalProjectCode1.py","file_name":"FinalProjectCode1.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71895042934","text":"# The Nature of Code - Daniel Shiffman http://natureofcode.com\n# Example 3-4 b: Pendulum Simulation\n# PyP5 port by: Yogesh Kulkarni\n# Adopted from processing.py based implementation at:\n# https://github.com/nature-of-code/noc-examples-python/blob/master/chp03_oscillation/NOC_3_10_PendulumExample\n# But followed on screen example\n# Reference Youtube Video: https://www.youtube.com/watch?v=rqecAdEGW6I&list=PLRqwX-V7Uu6aFlwukCmDf0-1-uSR7mklK&index=22\n\n# Pendulum\n#\n# A simple pendulum simulation\n# Given a pendulum with an angle theta (0 being the pendulum at rest) and\n# a radius r we can use sine to calculate the angular component of the\n# gravitational force.\n#\n# Gravity Force = Mass * Gravitational Constant;\n# Pendulum Force = Gravity Force * sine(theta)\n# Angular Acceleration =\n# Pendulum Force / Mass = gravitational acceleration * sine(theta);\n#\n# Note this is an ideal world scenario with no tension in the pendulum arm,\n# a more realistic formula might be:\n# Angular Acceleration = (g / R) * sine(theta)\n#\n# For a more substantial explanation, visit:\n# http://www.myphysicslab.com/pendulum1.html\n\nfrom p5 import *\n\n\nclass Pendulum(object):\n \"\"\"\n A Simple Pendulum Class\n Includes functionality for user can click and drag the pendulum\n \"\"\"\n\n def __init__(self, origin, r):\n \"\"\"\n This constructor could be improved to allow a greater variety of\n pendulums\n \"\"\"\n\n # position of pendulum ball\n self.position = Vector(0,0)\n\n # position of arm origin\n self.origin = origin.copy()\n\n # Length of arm\n self.r = r\n\n # Pendulum arm angle\n self.angle = PI / 4\n\n # Angle velocity\n self.aVelocity = 0.0\n\n # Angle acceleration\n self.aAcceleration = 0.0\n\n # Arbitrary ball radius\n self.ballr = 48\n\n # Arbitary damping amount\n self.damping = 0.995\n\n self.dragging = False\n\n def go(self):\n self.update()\n self.drag() # for user interaction\n self.display()\n\n def update(self):\n \"\"\"\n Function to update position\n \"\"\"\n # As long as we aren't dragging the pendulum, let it swing!\n if not self.dragging:\n # Arbitrary constant\n gravity = 0.4\n\n # Calculate acceleration\n # (see: http://www.myphysicslab.com/pendulum1.html)\n self.aAcceleration = (-1 * gravity / self.r) * sin(self.angle)\n\n # Increment velocity\n self.aVelocity += self.aAcceleration\n\n # Arbitrary damping\n self.aVelocity *= self.damping\n\n # Increment angle\n self.angle += self.aVelocity\n\n def display(self):\n # Polar to cartesian conversion\n self.position = Vector(self.r * sin(self.angle), self.r * cos(self.angle), 0)\n\n # Make sure the position is relative to the pendulum's origin\n self.position += self.origin\n\n stroke(0)\n strokeWeight(2)\n\n # Draw the arm\n line(self.origin.x, self.origin.y, self.position.x, self.position.y)\n ellipseMode(CENTER)\n fill(175)\n\n if self.dragging:\n fill(0)\n\n # Draw the ball\n ellipse(self.position.x, self.position.y, self.ballr, self.ballr)\n\n # The methods below are for mouse interaction\n\n def clicked(self, mx, my):\n \"\"\"\n This checks to see if we clicked on the pendulum ball\n \"\"\"\n m = Point(mx,my)\n pos = Point(self.position.x,self.position.y)\n d = distance(m, pos)\n if d < self.ballr:\n self.dragging = True\n\n def stopDragging(self):\n \"\"\"\n This tells us we are not longer clicking on the ball.\n \"\"\"\n # No velocity once you let go\n self.aVelocity = 0\n self.dragging = False\n\n def drag(self):\n # If we are draging the ball, we calculate the angle between the\n # pendulum origin and mouse position we assign that angle to the\n # pendulum\n if self.dragging:\n # Difference between 2 points\n diff = self.origin - Vector(mouse_x, mouse_y)\n # Angle relative to vertical axis\n angle = atan2(-1 * diff.y, diff.x) - radians(90)\n\ndef setup():\n size(640, 360)\n\n # Make a new Pendulum with an origin position and armlength\n global p\n p = Pendulum(Vector(width / 2, 0), 175)\n\n\ndef draw():\n background(255)\n p.go()\n\n\ndef mousePressed():\n p.clicked(mouse_x, mouse_y)\n\n\ndef mouseReleased():\n p.stopDragging()\n\nif __name__ == \"__main__\":\n run()","repo_name":"yogeshhk/TheNatureOfCode","sub_path":"src/noc_3_4_b_pendulumsimulation.py","file_name":"noc_3_4_b_pendulumsimulation.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17688474686","text":"from keras.models import Sequential\nfrom keras.models import Model, Sequential, load_model\nfrom keras.layers import Activation, concatenate, Dense, Flatten, Dropout, Reshape, Input, Add, RepeatVector, Permute\nfrom keras import regularizers\nfrom keras.layers import LSTM, Dense, Activation, Input\nfrom keras.layers import TimeDistributed, GaussianNoise, GaussianDropout, Dropout, Flatten\nfrom keras.models import Model\nfrom keras import backend as K\nfrom i3d_inception import Inception_Inflated3d, conv3d_bn\nimport keras\nimport numpy as np\nimport tensorflow as tf\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Conv3D\nfrom keras.layers import MaxPooling3D\nfrom keras.layers import AveragePooling3D\nfrom keras.layers import Lambda\nfrom keras.layers import GlobalAveragePooling3D\nfrom keras.models import Model, Sequential, load_model\nfrom keras import layers \nfrom pi3d import PI3D\n\nf_dept = 832\nno_of_p = 0\n\ndef inflate_dense(x):\n a = RepeatVector(8*7*7*f_dept)(x)\n a = Permute((2,1), input_shape=(8*7*7*f_dept, no_of_p))(a)\n a = Reshape((no_of_p,8,7,7,f_dept))(a)\n return a\n\ndef sum_feature(x):\n return K.sum(x, axis=1)\n\ndef concat_feature(x):\n a = Permute((2,3,4,5,1), input_shape=(no_of_p,8,7,7,f_dept))(x)\n a = Reshape((8,7,7,no_of_p*f_dept))(a)\n return a \n\ndef l1_reg(weight_mat):\n return 0.001*K.sum(K.square(weight_mat))\n\ndef pi3d_model(fc_main, model_inputs, dataset, protocol, all_models_name=[], mode='sum', dropout_prob=0.0, num_classes=60, sum_idx=0, train_end_to_end=False):\n mode = mode\n all_models_name=all_models_name\n #all_models = {}\n if sum_idx ==0 :\n global f_dept\n f_dept = 1024\n\n pi3d_interm_outputs = []\n for model_name in all_models_name:\n model = load_model('./weights_optim/{}/weights_{}_{}.hdf5'.format(dataset, model_name, protocol))\n for idx in range(len(model.layers)):\n model.get_layer(index=idx).name=model.layers[idx].name+'_'+model_name\n\n for l in model.layers:\n l.trainable=train_end_to_end\n\n model_inputs.append(model.input)\n if sum_idx <= 3 and sum_idx >= 0:\n pi3d_interm_outputs.append(Reshape((1,8,7,7,f_dept))(model.get_layer(index=-46+(2-sum_idx)*20).output))\n\n\n x = concatenate(pi3d_interm_outputs, axis=1)\n inflated_fc_main = keras.layers.core.Lambda(inflate_dense, output_shape=(no_of_p, 8, 7, 7, f_dept))(fc_main)\n multiplied_features = keras.layers.Multiply()([inflated_fc_main, x])\n\n if mode=='sum':\n x = keras.layers.core.Lambda(sum_feature, output_shape=(8, 7, 7, f_dept))(multiplied_features)\n elif mode=='cat':\n x = keras.layers.core.Lambda(concat_feature, output_shape=(8, 7, 7, f_dept*no_of_p))(multiplied_features)\n\n ##second part of I3D\n\n if sum_idx==2:\n # Mixed 5b\n branch_0 = conv3d_bn(x, 256, 1, 1, 1, padding='same', name=''+'second')\n\n branch_1 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_5b_1a_1x1'+'second')\n branch_1 = conv3d_bn(branch_1, 320, 3, 3, 3, padding='same', name='Conv3d_5b_1b_3x3'+'second')\n\n branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_5b_2a_1x1'+'second')\n branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5b_2b_3x3'+'second')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5b_3a_3x3'+'second')(x)\n branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5b_3b_1x1'+'second')\n\n x = layers.concatenate(\n [branch_0, branch_1, branch_2, branch_3],\n axis=4,\n name='Mixed_5b'+'second')\n\n if sum_idx==1 or sum_idx==2:\n # Mixed 5c\n branch_0 = conv3d_bn(x, 384, 1, 1, 1, padding='same', name='Conv3d_5c_0a_1x1'+'second')\n\n branch_1 = conv3d_bn(x, 192, 1, 1, 1, padding='same', name='Conv3d_5c_1a_1x1'+'second')\n branch_1 = conv3d_bn(branch_1, 384, 3, 3, 3, padding='same', name='Conv3d_5c_1b_3x3'+'second')\n\n branch_2 = conv3d_bn(x, 48, 1, 1, 1, padding='same', name='Conv3d_5c_2a_1x1'+'second')\n branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5c_2b_3x3'+'second')\n\n branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5c_3a_3x3'+'second')(x)\n branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5c_3b_1x1'+'second')\n\n x = layers.concatenate(\n [branch_0, branch_1, branch_2, branch_3],\n axis=4,\n name='Mixed_5c'+'second')\n\n #Classification block\n x = AveragePooling3D((2, 7, 7), strides=(1, 1, 1), padding='valid', name='global_avg_pool'+'second')(x)\n x = Dropout(dropout_prob)(x)\n\n x = conv3d_bn(x, num_classes, 1, 1, 1, padding='same',\n use_bias=True, use_activation_fn=False, use_bn=False, name='Conv3d_6a_1x1'+'second')\n\n x = Flatten(name='flatten'+'second')(x)\n predictions = Dense(num_classes, activation='softmax', name='softmax'+'second')(x)\n model = Model(inputs=model_inputs, outputs=predictions, name = 'PI3D')\n \n model_second = Inception_Inflated3d(include_top = True, weights='rgb_imagenet_and_kinetics')\n \n weight_idx_s = -45 + (2-sum_idx)*20\n weight_idx_e = -4\n \n for l_m, l_lh in zip(model.layers[weight_idx_s: weight_idx_e], model_second.layers[weight_idx_s: weight_idx_e]):\n l_m.set_weights(l_lh.get_weights())\n l_m.trainable=True\n \n lstm_weights = \"./weights_optim/{}/lstm_model_{}.hdf5\".format(dataset, protocol)\n l_model = load_model(lstm_weights, compile=False)\n\n for idx1 in range(len(model.layers)):\n n1 = model.layers[idx1].name\n if 'lstm' in n1:\n for idx2 in range(len(l_model.layers)):\n n2 = l_model.layers[idx2].name\n if n1==n2:\n model.layers[idx1].set_weights(l_model.layers[idx2].get_weights())\n break\n \n\n return model\n\ndef build_model_without_TS(dataset, protocol, n_neuron, n_dropout, batch_size, timesteps, data_dim, num_classes, all_models_name, training_mode='mid', attention_mode='sum', sum_idx=0, train_end_to_end=False) :\n print('Build model...') \n model_inputs=[]\n x1 = Input(shape=(timesteps, data_dim), name='skeleton_input') \n model_inputs.append(x1)\n\n global no_of_p\n no_of_p = len(all_models_name)\n\n main_lstm_1 = LSTM(n_neuron, return_sequences=True, trainable=False)(x1)\n main_lstm_2 = LSTM(n_neuron, return_sequences=True, trainable=False)(main_lstm_1)\n main_lstm_3 = LSTM(n_neuron, trainable=False)(main_lstm_2)\n main_lstm_dropped = Dropout(n_dropout, trainable=False, name='droput_1')(main_lstm_3)\n\n z = Dense(128, activation='tanh', name='z_layer',trainable=False)(main_lstm_dropped)\n fc_main = Dense(no_of_p, activity_regularizer=None, kernel_initializer='zeros', bias_initializer='zeros', activation='softmax',trainable=False, name='dense_1')(z)\n \n model = pi3d_model(fc_main, model_inputs, dataset, protocol, all_models_name, attention_mode, n_dropout, num_classes=num_classes, sum_idx=0, train_end_to_end=False) \n return model \n\ndef build_model_with_TS(n_neuron, n_dropout, batch_size, timesteps, data_dim, num_classes):\n print('Build model...') \n model = Sequential()\n model.add(LSTM(n_neuron, return_sequences=True, batch_input_shape=(batch_size, timesteps, data_dim)))\n model.add(LSTM(n_neuron, return_sequences=True))\n model.add(LSTM(n_neuron, return_sequences=True))\n model.add(Dropout(n_dropout))\n model.add(Timedistributed(Dense(num_classes, activation='softmax')))\n return model\n\n","repo_name":"srijandas07/P-I3D","sub_path":"models_attention.py","file_name":"models_attention.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"5447140595","text":"#-*- coding: utf-8 -*-\n\nimport sys\nimport unittest\nfrom os import path\nfrom setuptools import setup, find_packages\n\n\ncurrent = path.abspath(path.dirname(__file__))\n\nwith open(path.join(current, 'README.md'), 'r') as readme:\n long_description = readme.read()\n\nexec(open('astair/version.py').read())\n\nsetup(\n name=\"asTair\",\n version=__version__,\n packages=find_packages(exclude=['tests']),\n install_requires=['Click >=7, < 8', 'pysam >= 0.15.0', 'pyahocorasick >= 1, < 2', 'numpy >= 1, < 2'],\n extras_require={'plot': [\"matplotlib\"],},\n test_suite='tests',\n #scripts=['./astair/safe_division.py', './astair/bam_file_parser.py', './astair/simple_fasta_parser.py', './astair/DNA_sequences_operations.py', './astair/context_search.py', './astair/context_search.py', './astair/statistics_summary.py'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, < 4',\n author=\"Gergana V. Velikova and Benjamin Schuster-Boeckler\",\n author_email=\"gergana_velikova@yahoo.com\",\n description=\"A tool for the analysis of bisulfite-free and base-resolution sequencing data generated with TET Assisted Pyridine borane Sequencing (TAPS), or other modified cytosine to thymine conversion methods (mCtoT). It also has some features for bisulfite sequencing data (unmodified cytosine to thymine conversion methods, CtoT).\", long_description=long_description,\n long_description_content_type='text/markdown',\n license=\"GPLv3\",\n entry_points={\n 'console_scripts':\n ['astair=scripts.run:cli']\n }, keywords=\"TAPS taps cytosine caller methylation modification WGBS RRBS bisulfite epigenetics\", url=\"https://bitbucket.org/bsblabludwig/astair/\", classifiers=['Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Bio-Informatics']\n )\n\n","repo_name":"1156054203/astair","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4493871783","text":"'''\nCreated on Mar 2, 2017\n\n@author: rch\n'''\n\nfrom builtins import len\nimport os\nimport tempfile\nfrom threading import Thread\n\nfrom matplotlib.figure import \\\n Figure\nfrom mayavi.core.ui.api import \\\n MayaviScene, SceneEditor, MlabSceneModel\nfrom pyface.api import GUI\nfrom reporter import ROutputSection\nfrom simulator.i_hist import IHist\nfrom traits.api import \\\n Str, Instance, Event, Enum, \\\n Tuple, List, Range, Int, Float, \\\n Property, cached_property, \\\n on_trait_change, Bool, Button, Directory, \\\n HasStrictTraits, WeakRef\nfrom traitsui.api import \\\n View, Item, UItem, VGroup, VSplit, \\\n HSplit, HGroup, Tabbed, ListEditor\nfrom traitsui.tabular_adapter import TabularAdapter\nfrom util.traits.editors import \\\n MPLFigureEditor\nfrom view.plot2d.viz2d import Viz2D\nfrom view.plot3d.viz3d import Viz3D\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport traits.api as tr\nfrom traitsui.api \\\n import TableEditor\nfrom traitsui.extras.checkbox_column \\\n import CheckboxColumn\nfrom traitsui.table_column \\\n import ObjectColumn\n\n\nclass RunThread(Thread):\n '''Time loop thread responsible.\n '''\n\n def __init__(self, vs, vot, *args, **kw):\n super(RunThread, self).__init__(*args, **kw)\n self.daemon = True\n self.vs = vs\n self.vot = vot\n\n def run(self):\n # print 'STARTING THREAD'\n GUI.invoke_later(self.vs.update_pipeline, self.vot)\n # print 'THREAD ENDED'\n\n\nclass Viz2DAdapter(TabularAdapter):\n # List of (Column labels, Column ID).\n columns = [\n ('Label', 'label'),\n ('Visible', 'visible'),\n ]\n\n\n# The tabular editor works in conjunction with an adapter class, derived from\n# the 'players' trait table editor:\nviz2d_list_editor = TableEditor(\n sortable=False,\n configurable=False,\n auto_size=False,\n selected='selected_viz2d',\n click='viz2d_list_editor_clicked',\n columns=[CheckboxColumn(name='visible', label='visible',\n width=0.12),\n ObjectColumn(name='name', editable=False, width=0.24,\n horizontal_alignment='left'),\n ])\n\n# The tabular editor works in conjunction with an adapter class, derived from\n# the 'players' trait table editor:\npp_list_editor = TableEditor(\n sortable=False,\n configurable=False,\n auto_size=False,\n selected='selected_pp',\n columns=[ObjectColumn(name='name', editable=False, width=0.24,\n horizontal_alignment='left'),\n ])\n\n# The tabular editor works in conjunction with an adapter class, derived from\n# the 'players' trait table editor:\nviz3d_list_editor = TableEditor(\n sortable=False,\n configurable=False,\n auto_size=False,\n selected='selected_viz3d',\n # click='viz3d_list_editor_clicked',\n columns=[CheckboxColumn(name='visible', label='visible',\n width=0.12),\n ObjectColumn(name='name', editable=False, width=0.24,\n horizontal_alignment='left'),\n ])\n\n\nclass AnimationDialog(HasStrictTraits):\n '''Animation parameters for a diagram.\n '''\n sheet = WeakRef\n\n export_path = Directory\n status_message = Str('')\n\n animate_from = Float(0.0, auto_set=False, enter_set=True)\n animate_to = Float(1.0, auto_set=False, enter_set=True)\n animate_steps = Int(30, auto_set=False, enter_set=True)\n\n animate_button = Button(label='Animate selected diagram')\n\n def _animate_button_fired(self):\n\n if self.export_path == '':\n dir_ = tempfile.mkdtemp()\n else:\n dir_ = self.export_path\n name = self.sheet.selected_viz2d.name\n path = os.path.join(dir_, name)\n\n if os.path.exists(path):\n self.status_message = 'overwriting animation %s' % name\n else:\n os.makedirs(path)\n\n for i, vot in enumerate(np.linspace(self.animate_from,\n self.animate_to,\n self.animate_steps)):\n fname = os.path.join(path, 'step%03d.jpg' % i)\n self.sheet.selected_viz2d.savefig_animate(\n vot, fname,\n (self.sheet.fig_width,\n self.sheet.fig_height)\n )\n self.status_message = 'animation stored in %s' % path\n\n traits_view = View(\n VGroup(\n VGroup(\n HGroup(\n UItem('animate_from', full_size=True, springy=True),\n UItem('animate_to', springy=True),\n UItem('animate_steps', springy=True),\n ),\n label='Animation range'\n ),\n Item('export_path'),\n HGroup(\n UItem('status_message', style='readonly')\n ),\n UItem('animate_button',\n springy=False, resizable=True),\n ),\n buttons=['OK', 'Cancel'],\n title='Animation dialog'\n )\n\n\nclass PlotPerspective(HasStrictTraits):\n name = Str('')\n viz2d_list = List(Viz2D, input=True)\n positions = List([], input=True)\n twinx = List([], input=True)\n twiny = List([], input=True)\n data_changed = Event\n\n figure = Instance(Figure)\n\n tight_layout = Bool(True)\n\n def _figure_default(self):\n figure = Figure(facecolor='white')\n figure.set_tight_layout(self.tight_layout)\n return figure\n\n viz2d_axes = Property\n\n @cached_property\n def _get_viz2d_axes(self):\n return {viz2d: self.figure.add_subplot(loc)\n for viz2d, loc in zip(self.viz2d_list,\n self.positions)}\n\n twinx_axes = Property\n\n @cached_property\n def _get_twinx_axes(self):\n return {viz_2: self.viz2d_axes[viz_1].twinx()\n for viz_1, viz_2, _ in self.twinx}\n\n twiny_axes = Property\n\n @cached_property\n def _get_twiny_axes(self):\n return {viz_2: self.viz2d_axes[viz_1].twiny()\n for viz_1, viz_2, _ in self.twiny}\n\n axes = Property\n\n @cached_property\n def _get_axes(self):\n ad = {}\n ad.update(self.viz2d_axes)\n ad.update(self.twinx_axes)\n ad.update(self.twiny_axes)\n return ad\n\n def clear(self):\n for viz2d, ax in self.viz2d_axes.items():\n ax.clear()\n viz2d.reset(ax)\n for viz2d, ax in self.twinx_axes.items():\n ax.clear()\n viz2d.reset(ax)\n for viz2d, ax in self.twiny_axes.items():\n ax.clear()\n viz2d.reset(ax)\n\n def replot(self, vot):\n for viz2d, ax in self.axes.items():\n ax.clear()\n viz2d.clear()\n viz2d.plot(ax, vot)\n self.data_changed = True\n\n def align_xaxis(self):\n for v1, v2, alignx in self.twiny:\n if alignx:\n ax1 = self.viz2d_axes[v1]\n ax2 = self.twiny_axes[v2]\n self._align_xaxis(ax1, ax2)\n\n def _align_xaxis(self, ax1, ax2):\n \"\"\"Align zeros of the two axes, zooming them out by same ratio\"\"\"\n axes = (ax1, ax2)\n extrema = [ax.get_xlim() for ax in axes]\n tops = [extr[1] / (extr[1] - extr[0]) for extr in extrema]\n # Ensure that plots (intervals) are ordered bottom to top:\n if tops[0] > tops[1]:\n axes, extrema, tops = [list(reversed(l))\n for l in (axes, extrema, tops)]\n\n # How much would the plot overflow if we kept current zoom levels?\n tot_span = tops[1] + 1 - tops[0]\n\n b_new_t = extrema[0][0] + tot_span * (extrema[0][1] - extrema[0][0])\n t_new_b = extrema[1][1] - tot_span * (extrema[1][1] - extrema[1][0])\n axes[0].set_xlim(extrema[0][0], b_new_t)\n axes[1].set_xlim(t_new_b, extrema[1][1])\n\n trait_view = View(\n UItem('figure', editor=MPLFigureEditor(),\n resizable=True,\n springy=True,),\n )\n\n\nclass BMCSVizSheet(ROutputSection):\n '''Vieualization sheet\n - controls the time displayed\n - contains several vizualization adapters.\n This class could be called BMCSTV - for watching the time\n dependent response. It can have several channels - in 2D and 3D\n '''\n\n def __init__(self, *args, **kw):\n super(BMCSVizSheet, self).__init__(*args, **kw)\n self.on_trait_change(self.viz2d_list_items_changed,\n 'viz2d_list_items')\n\n name = Str\n\n hist = Instance(IHist)\n\n min = Float(0.0)\n '''Simulation start is always 0.0\n '''\n max = Float(1.0)\n '''Upper range limit of the current simulator.\n This range is determined by the the time-loop range\n of the model. \n '''\n vot = Float\n\n def _vot_default(self):\n return self.min\n\n def _vot_changed(self):\n if self.hist:\n self.hist.vot = self.vot\n\n vot_slider = Range(low='min', high='max', step=0.01,\n enter_set=True, auto_set=False)\n '''Time line controlling the current state of the simulation.\n this value is synchronized with the control time of the\n time loop setting the tline. The vot_max = tline.max.\n The value of vot follows the value of tline.val in monitoring mode.\n By default, the monitoring mode is active with vot = tline.value.\n When sliding to a value vot < tline.value, the browser mode is activated.\n When sliding into the range vot > tline.value the monitoring mode\n is reactivated. \n '''\n\n def _vot_slider_default(self):\n return 0.0\n\n mode = Enum('monitor', 'browse')\n\n def _mode_changed(self):\n if self.mode == 'browse':\n self.offline = False\n\n time = Float(0.0)\n\n def time_range_changed(self, max_):\n self.max = max_\n\n def time_changed(self, time):\n self.time = time\n if self.mode == 'monitor':\n self.vot = time\n self.vot_slider = time\n\n def _vot_slider_changed(self):\n if self.mode == 'browse':\n if self.vot_slider >= self.time:\n self.mode = 'monitor'\n self.vot_slider = self.time\n self.vot = self.time\n else:\n self.vot = self.vot_slider\n elif self.mode == 'monitor':\n if self.vot_slider < self.time:\n self.mode = 'browse'\n self.vot = self.vot_slider\n else:\n self.vot_slider = self.time\n self.vot = self.time\n\n offline = Bool(True)\n '''If the sheet is offline, the plot refresh is inactive.\n The sheet starts in offline mode and is activated once the signal\n run_started has been received. Upon run_finished the \n the sheet goes directly into the offline mode again.\n \n If the user switches to browser mode, the vizsheet gets online \n and reploting is activated.\n '''\n\n running = Bool(False)\n\n def run_started(self):\n self.running = True\n self.offline = False\n for pp in self.pp_list:\n pp.clear()\n self.mode = 'monitor'\n if self.reference_viz2d:\n ax = self.reference_axes\n ax.clear()\n self.reference_viz2d.reset(ax)\n\n def run_finished(self):\n self.skipped_steps = self.monitor_chunk_size\n # self.update_pipeline(1.0)\n self.replot()\n self.running = False\n self.offline = True\n\n monitor_chunk_size = Int(10, label='Monitor each # steps')\n\n skipped_steps = Int(1)\n\n @on_trait_change('vot,n_cols')\n def replot(self):\n if self.offline:\n return\n if self.running and self.mode == 'monitor' and \\\n self.skipped_steps < (self.monitor_chunk_size - 1):\n self.skipped_steps += 1\n return\n for pp in self.pp_list:\n pp.replot(self.vot)\n# for viz2d, ax in self.axes.items():\n# ax.clear()\n# viz2d.clear()\n# viz2d.plot(ax, self.vot)\n# if self.selected_pp:\n# self.selected_pp.align_xaxis()\n if self.reference_viz2d:\n ax = self.reference_axes\n ax.clear()\n self.reference_viz2d.clear()\n self.reference_viz2d.plot(ax, self.vot)\n self.data_changed = True\n self.skipped_steps = 0\n if self.mode == 'browse':\n self.update_pipeline(self.vot)\n else:\n up = RunThread(self, self.vot)\n up.start()\n\n viz2d_list = List(Viz2D)\n '''List of visualization adaptors for 2D.\n '''\n viz2d_dict = Property\n\n def _get_viz2d_dict(self):\n return {viz2d.name: viz2d for viz2d in self.viz2d_list}\n\n viz2d_names = Property\n '''Names to be supplied to the selector of the\n reference graph.\n '''\n\n def _get_viz2d_names(self):\n return list(self.viz2d_dict.keys())\n\n viz2d_list_editor_clicked = Tuple\n viz2d_list_changed = Event\n\n def _viz2d_list_editor_clicked_changed(self, *args, **kw):\n _, column = self.viz2d_list_editor_clicked\n self.offline = False\n self.viz2d_list_changed = True\n if self.plot_mode == 'single':\n if column.name == 'visible':\n self.selected_viz2d.visible = True\n self.plot_mode = 'multiple'\n else:\n self.replot()\n elif self.plot_mode == 'multiple':\n if column.name != 'visible':\n self.plot_mode = 'single'\n else:\n self.replot()\n\n plot_mode = Enum('multiple', 'single')\n\n def _plot_mode_changed(self):\n if self.plot_mode == 'single':\n self.replot_selected_viz2d()\n elif self.plot_mode == 'multiple':\n self.replot()\n\n def replot_selected_viz2d(self):\n for viz2d in self.viz2d_list:\n viz2d.visible = False\n self.selected_viz2d.visible = True\n self.n_cols = 1\n self.viz2d_list_changed = True\n self.replot()\n\n def viz2d_list_items_changed(self):\n self.replot()\n\n def get_subrecords(self):\n '''What is this good for?\n '''\n return self.viz2d_list\n\n export_button = Button(label='Export selected diagram')\n\n def plot_in_window(self):\n fig = plt.figure(figsize=(self.fig_width, self.fig_height))\n ax = fig.add_subplot(111)\n self.selected_viz2d.plot(ax, self.vot)\n fig.show()\n\n def _export_button_fired(self, vot=0):\n print('in export button fired')\n Thread(target=self.plot_in_window).start()\n print('thread started')\n\n fig_width = Float(8.0, auto_set=False, enter_set=True)\n fig_height = Float(5.0, auto_set=False, enter_set=True)\n\n save_button = Button(label='Save selected diagram')\n\n animate_button = Button(label='Animate selected diagram')\n\n def _animate_button_fired(self):\n ad = AnimationDialog(sheet=self)\n ad.edit_traits()\n return\n #=========================================================================\n # Reference figure serving for orientation.\n #=========================================================================\n reference_viz2d_name = Enum('', values=\"viz2d_names\")\n '''Current name of the reference graphs.\n '''\n\n def _reference_viz2d_name_changed(self):\n self.replot()\n\n reference_viz2d_cumulate = Bool(False, label='cumulate')\n reference_viz2d = Property(Instance(Viz2D),\n depends_on='reference_viz2d_name')\n '''Visualization of a graph showing the time context of the\n current visualization state. \n '''\n\n def _get_reference_viz2d(self):\n if self.reference_viz2d_name == None:\n if len(self.viz2d_dict):\n return self.viz2d_list[0]\n else:\n return None\n return self.viz2d_dict[self.reference_viz2d_name]\n\n reference_figure = Instance(Figure)\n\n def _reference_figure_default(self):\n figure = Figure(facecolor='white')\n figure.set_tight_layout(True)\n return figure\n\n reference_axes = Property(List,\n depends_on='reference_viz2d_name')\n '''Derived axes objects reflecting the layout of plot pane\n and the individual. \n '''\n @cached_property\n def _get_reference_axes(self):\n return self.reference_figure.add_subplot(1, 1, 1)\n\n selected_viz2d = Instance(Viz2D)\n\n def _selected_viz2d_changed(self):\n if self.plot_mode == 'single':\n self.replot_selected_viz2d()\n\n n_cols = Range(low=1, high=3, value=2, label='Number of columns',\n tooltip='Defines a number of columns within the plot pane',\n enter_set=True, auto_set=False)\n\n figure = Instance(Figure)\n\n tight_layout = Bool(True)\n\n def _figure_default(self):\n figure = Figure(facecolor='white')\n figure.set_tight_layout(self.tight_layout)\n return figure\n\n visible_viz2d_list = Property(List,\n depends_on='viz2d_list,viz2d_list_items,n_cols,viz2d_list_changed')\n '''Derived axes objects reflecting the layout of plot pane\n and the individual. \n '''\n @cached_property\n def _get_visible_viz2d_list(self):\n viz_list = []\n for viz2d in self.viz2d_list:\n if viz2d.visible:\n viz_list.append(viz2d)\n return viz_list\n\n pp_list = List(PlotPerspective)\n\n selected_pp = Instance(PlotPerspective)\n\n xaxes = Property(List,\n depends_on='selected_pp')\n '''Derived axes objects reflecting the layout of plot pane\n and the individual. \n '''\n @cached_property\n def _get_xaxes(self):\n self.figure.clear()\n if self.selected_pp:\n self.selected_pp.figure = self.figure\n ad = self.selected_pp.axes\n else:\n n_fig = len(self.visible_viz2d_list)\n n_cols = self.n_cols\n n_rows = (n_fig + n_cols - 1) / self.n_cols\n ad = {viz2d: self.figure.add_subplot(n_rows, self.n_cols, i + 1)\n for i, viz2d in enumerate(self.visible_viz2d_list)}\n return ad\n\n data_changed = Event\n\n bgcolor = tr.Tuple(1.0, 1.0, 1.0)\n fgcolor = tr.Tuple(0.0, 0.0, 0.0)\n\n scene = Instance(MlabSceneModel)\n\n def _scene_default(self):\n return MlabSceneModel()\n\n mlab = Property(depends_on='input_change')\n '''Get the mlab handle'''\n\n def _get_mlab(self):\n return self.scene.mlab\n\n fig = Property()\n '''Figure for 3D visualization.\n '''\n @cached_property\n def _get_fig(self):\n fig = self.mlab.gcf()\n bgcolor = tuple(self.bgcolor)\n fgcolor = tuple(self.fgcolor)\n self.mlab.figure(fig, fgcolor=fgcolor, bgcolor=bgcolor)\n return fig\n\n def show(self, *args, **kw):\n '''Render the visualization.\n '''\n self.mlab.show(*args, **kw)\n\n def add_viz3d(self, viz3d, order=1):\n '''Add a new visualization objectk.'''\n viz3d.ftv = self\n vis3d = viz3d.vis3d\n name = viz3d.name\n label = '%s[%s:%s]-%s' % (name,\n str(vis3d.__class__),\n str(viz3d.__class__),\n vis3d\n )\n if label in self.viz3d_dict:\n raise KeyError('viz3d object named %s already registered' % label)\n viz3d.order = order\n self.viz3d_dict[label] = viz3d\n\n viz3d_dict = tr.Dict(tr.Str, tr.Instance(Viz3D))\n '''Dictionary of visualization objects.\n '''\n\n viz3d_list = tr.Property\n\n def _get_viz3d_list(self):\n map_order_viz3d = {}\n for idx, (viz3d) in enumerate(self.viz3d_dict.values()):\n order = viz3d.order\n map_order_viz3d['%5g%5g' % (order, idx)] = viz3d\n return [map_order_viz3d[key] for key in sorted(map_order_viz3d.keys())]\n\n pipeline_ready = Bool(False)\n\n def setup_pipeline(self):\n if self.pipeline_ready:\n return\n self.fig\n fig = self.mlab.gcf()\n fig.scene.disable_render = True\n for viz3d in self.viz3d_list:\n viz3d.setup()\n fig.scene.disable_render = False\n self.pipeline_ready = True\n\n def update_pipeline(self, vot):\n self.setup_pipeline()\n # get the current constrain information\n self.vot = vot\n fig = self.mlab.gcf()\n fig.scene.disable_render = True\n for viz3d in self.viz3d_list:\n viz3d.plot(vot)\n fig.scene.disable_render = False\n\n selected_viz3d = Instance(Viz3D)\n\n def _selected_viz3d_changed(self):\n print('selection done')\n\n # Traits view definition:\n traits_view = View(\n VSplit(\n HSplit(\n Tabbed(\n UItem('pp_list',\n id='notebook',\n style='custom',\n resizable=True,\n editor=ListEditor(use_notebook=True,\n deletable=False,\n # selected='selected_pp',\n export='DockWindowShell',\n page_name='.name')\n ),\n UItem('scene', label='3d scene',\n editor=SceneEditor(scene_class=MayaviScene)\n ),\n scrollable=True,\n label='Plot panel'\n ),\n VGroup(\n Item('n_cols', width=250),\n Item('plot_mode@', width=250),\n VSplit(\n UItem('viz2d_list@',\n editor=viz2d_list_editor,\n width=100),\n UItem('selected_viz2d@',\n width=200),\n UItem('pp_list@',\n editor=pp_list_editor,\n width=100),\n # UItem('selected_pp@',\n # width=200),\n UItem('viz3d_list@',\n editor=viz3d_list_editor,\n width=100),\n UItem('selected_viz3d@',\n width=200),\n VGroup(\n # UItem('export_button',\n # springy=False, resizable=True),\n # VGroup(\n # HGroup(\n # UItem('fig_width', springy=True,\n # resizable=False),\n # UItem('fig_height', springy=True),\n # ),\n # label='Figure size'\n # ),\n UItem('animate_button',\n springy=False, resizable=True),\n ),\n VGroup(\n UItem('reference_viz2d_name', resizable=True),\n UItem('reference_figure', editor=MPLFigureEditor(),\n width=200,\n # springy=True\n ),\n label='Reference graph',\n )\n ),\n label='Plot configure',\n scrollable=True\n ),\n ),\n VGroup(\n HGroup(\n Item('mode', resizable=False, springy=False),\n Item('monitor_chunk_size', resizable=False, springy=False),\n ),\n Item('vot_slider', height=40),\n )\n ),\n resizable=True,\n width=0.8, height=0.8,\n buttons=['OK', 'Cancel']\n )\n\n\nif __name__ == '__main__':\n viz3d_1 = Viz3D(label='first')\n viz3d_2 = Viz3D(label='second')\n vs = BMCSVizSheet()\n vs.add_viz3d(viz3d_1)\n vs.add_viz3d(viz3d_2)\n vs.run_started()\n vs.replot()\n vs.run_finished()\n vs.configure_traits()\n","repo_name":"simvisage/bmcs","sub_path":"view/window/bmcs_viz_sheet.py","file_name":"bmcs_viz_sheet.py","file_ext":"py","file_size_in_byte":24629,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"27843674402","text":"\"\"\"\r\n205. Isomorphic Strings\r\n\r\nGiven two strings s and t, determine if they are isomorphic.\r\nTwo strings s and t are isomorphic if the characters in s can be replaced to get t.\r\nAll occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character, but a character may map to itself.\r\n\r\nExample 1:\r\n\r\nInput: s = \"egg\", t = \"add\"\r\nOutput: true\r\nExample 2:\r\n\r\nInput: s = \"foo\", t = \"bar\"\r\nOutput: false\r\nExample 3:\r\n\r\nInput: s = \"paper\", t = \"title\"\r\nOutput: true\r\n\"\"\"\r\n\r\n\r\ndef is_isomorphic(s, t):\r\n sozluk = {}\r\n for i in range(len(s)):\r\n if s[i] not in sozluk:\r\n sozluk[s[i]] = t[i]\r\n else:\r\n if t[i] != sozluk[s[i]]:\r\n return False\r\n return True\r\n\r\n\r\ndef main():\r\n cases = [\r\n {\r\n \"s\": \"egg\",\r\n \"t\": \"add\",\r\n \"answer\": True\r\n },\r\n {\r\n \"s\": \"foo\",\r\n \"t\": \"bar\",\r\n \"answer\": False\r\n },\r\n {\r\n \"s\": \"paper\",\r\n \"t\": \"title\",\r\n \"answer\": True\r\n }\r\n ]\r\n for case in cases:\r\n assert is_isomorphic(case['s'], case[\"t\"]) == case[\"answer\"]\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","repo_name":"Luvishable/LEETCODE-SOLUTIONS","sub_path":"isomorphic_strings.py","file_name":"isomorphic_strings.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3222766240","text":"import torch\nfrom torch import nn\nimport math\n\n\nclass PositionalEncoding(nn.Module):\n \"\"\"\n Positional encoding module for NeRF.\n \"\"\"\n\n def __init__(self, dim: int, num_freqs: int):\n super(PositionalEncoding, self).__init__()\n self.dim = dim\n self.num_freqs = num_freqs\n self.freqs = 2.0 ** torch.arange(num_freqs)\n\n def forward(self, x):\n # Expand dimensions\n x = x.unsqueeze(-2)\n\n # Scale input by frequencies\n scaled_x = x * self.freqs.view(1, 1, -1)\n\n # Apply sin and cos to even and odd indices\n x = torch.cat([torch.sin(scaled_x), torch.cos(scaled_x)], dim=-1)\n\n # Flatten last two dimensions\n x = x.view(*x.shape[:-2], -1)\n\n return x\n\n\ndef hash_encoder(coords, log2_hashmap_size) -> torch.Tensor:\n \"\"\"\n Hashes the coordinates to a hashmap of size 2^log2_hashmap_size\n :param coords: coordinates of each point in space. This function can process up to 7 dim\n coordinates\n :param log2_hashmap_size: log2 of the size of the hashmap\n :return: the hashed coordinates as a tensor\n \"\"\"\n primes = [1, 2654435761, 805459861, 3674653429, 2097192037, 1434869437, 2165219737]\n\n xor_result = torch.zeros_like(coords)[..., 0]\n for i in range(coords.shape[-1]):\n xor_result ^= coords[..., i] * primes[i]\n\n return torch.tensor((1 << log2_hashmap_size) - 1).to(xor_result.device) & xor_result\n\n\nclass HashEncoder(nn.Module):\n \"\"\"\n Hash encoder module for NeRF.\n \"\"\"\n\n def __init__(self, bounding_box=(torch.tensor([-1., -1., -1.]), torch.tensor([1., 1., 1.])),\n n_levels=16, n_features_per_level=2, log2_hashmap_size=19, base_resolution=16,\n finest_resolution=512):\n \"\"\"\n :param bounding_box: the bounding box of the scene\n :param n_levels: the number of leves of the hash encoder\n :param n_features_per_level: number of features per level\n :param log2_hashmap_size: the log2 of the size of the hashmap\n :param base_resolution: the base (minimum) resolution of the hash encoder\n :param finest_resolution: the finest (maximum) resolution of the hash encoder\n \"\"\"\n super(HashEncoder, self).__init__()\n assert n_levels > 0\n assert n_features_per_level > 0\n assert log2_hashmap_size > 0\n assert base_resolution > 0\n assert finest_resolution > 0\n assert finest_resolution >= base_resolution\n self.bounding_box = bounding_box\n self.n_levels = n_levels\n self.n_features_per_level = n_features_per_level\n self.log2_hashmap_size = log2_hashmap_size\n self.base_resolution = torch.tensor(base_resolution)\n self.finest_resolution = torch.tensor(finest_resolution)\n self.out_dim = self.n_levels * self.n_features_per_level\n\n # grow factor\n self.b = torch.exp((torch.log(self.finest_resolution) - torch.log(self.base_resolution)) /\n (n_levels - 1))\n\n # initialize embeddings\n embed_list = []\n for _ in range(n_levels):\n embed = nn.Embedding(2 ** self.log2_hashmap_size, self.n_features_per_level)\n nn.init.uniform_(embed.weight, a=-0.0001, b=0.0001)\n embed_list.append(embed)\n self.embeddings = nn.ModuleList(embed_list)\n\n self.box_offsets = torch.tensor(\n [[[i, j, k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]])\n\n box_min, box_max = bounding_box\n resolutions = [math.floor(self.base_resolution * self.b ** i) for i in range(n_levels)]\n self.resolutions = torch.tensor(resolutions)\n self.grid_sizes = torch.cat([(box_max - box_min) / res for res in resolutions], -1)\n\n def get_voxel_vertices(self,\n xyz: torch.Tensor,\n level: int,\n log2_hashmap_size: torch.Tensor) \\\n -> tuple[: torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Gets the vertices of the voxels for the given coordinates and level.\n :param xyz: the 3D coordinates of the points, usually bach size x 3\n :param level: the level of the hash encoder\n :param log2_hashmap_size: the log2 of the size of the hashmap\n :return: a tuple of the min and max vertices of the voxels and the hashed voxel indices\n \"\"\"\n box_min, box_max = self.bounding_box\n\n # clip the points outside the bounding box\n xyz = torch.clamp(xyz, min=box_min, max=box_max)\n\n grid_size = self.grid_sizes[level]\n\n bottom_left_idx = torch.floor((xyz - box_min) / grid_size).int()\n voxel_min_vertex = bottom_left_idx * grid_size + box_min\n ones = torch.tensor([1.0, 1.0, 1.0]).to(xyz.device)\n voxel_max_vertex = voxel_min_vertex + ones * grid_size\n\n voxel_indices = bottom_left_idx.unsqueeze(1) + self.box_offsets\n hashed_voxel_indices = hash_encoder(voxel_indices, log2_hashmap_size)\n\n return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices\n\n def trilinear_interp(self,\n xyz: torch.Tensor,\n voxel_min_vertex: torch.Tensor,\n voxel_max_vertex: torch.Tensor,\n voxel_embedds: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Trilinear interpolation of the given coordinates.\n :param xyz: the 3D coordinates of the points, usually bach size x 3\n :param voxel_min_vertex: minimum vertex of the voxels\n :param voxel_max_vertex: maximum vertex of the voxels\n :param voxel_embedds: embeddings of the voxels\n :return: the interpolated embeddings as a tensor\n \"\"\"\n\n # source: https://en.wikipedia.org/wiki/Trilinear_interpolation\n weights = (xyz - voxel_min_vertex) / (voxel_max_vertex - voxel_min_vertex) # B x 3\n\n # step 1\n # 0->000, 1->001, 2->010, 3->011, 4->100, 5->101, 6->110, 7->111\n c00 = voxel_embedds[:, 0] * (1 - weights[:, 0][:, None]) + \\\n voxel_embedds[:, 4] * weights[:, 0][:, None]\n c01 = voxel_embedds[:, 1] * (1 - weights[:, 0][:, None]) + \\\n voxel_embedds[:, 5] * weights[:, 0][:, None]\n c10 = voxel_embedds[:, 2] * (1 - weights[:, 0][:, None]) + \\\n voxel_embedds[:, 6] * weights[:, 0][:, None]\n c11 = voxel_embedds[:, 3] * (1 - weights[:, 0][:, None]) + \\\n voxel_embedds[:, 7] * weights[:, 0][:, None]\n\n # step 2\n c0 = c00 * (1 - weights[:, 1][:, None]) + c10 * weights[:, 1][:, None]\n c1 = c01 * (1 - weights[:, 1][:, None]) + c11 * weights[:, 1][:, None]\n\n # step 3\n c = c0 * (1 - weights[:, 2][:, None]) + c1 * weights[:, 2][:, None]\n\n return c\n\n def forward(self, xyz: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Forward pass of the hash encoder.\n :param xyz: the 3D coordinates of the points, usually bach size x 3\n :return: a tuple of the embedded coordinates and a mask of the points that are on the\n surface\n \"\"\"\n x_embedded_all = []\n if xyz.device != self.resolutions.device:\n self.bounding_box = (self.bounding_box[0].to(xyz.device),\n self.bounding_box[1].to(xyz.device))\n self.box_offsets = self.box_offsets.to(xyz.device)\n self.grid_sizes = self.grid_sizes.to(xyz.device)\n self.resolutions = self.resolutions.to(xyz.device)\n\n for level in range(self.n_levels):\n voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices = \\\n self.get_voxel_vertices(xyz, level, self.log2_hashmap_size)\n\n voxel_embedds = self.embeddings[level](hashed_voxel_indices)\n\n x_embedded = self.trilinear_interp(xyz, voxel_min_vertex, voxel_max_vertex,\n voxel_embedds)\n x_embedded_all.append(x_embedded)\n\n with torch.no_grad():\n box_min, box_max = self.bounding_box\n keep_mask = xyz == torch.max(torch.min(xyz, box_max), box_min)\n keep_mask = keep_mask.sum(dim=-1) == keep_mask.shape[-1]\n return torch.cat(x_embedded_all, dim=-1), keep_mask\n\n\nclass SphericalEncoder(nn.Module):\n \"\"\"\n Spherical encoder module for NeRF. This is used to create the embeddings of the rays directions.\n \"\"\"\n\n def __init__(self, input_dim: int = 3, degree: int = 4):\n \"\"\"\n :param input_dim: the dimension of the input\n :param degree: the degree of the spherical harmonics\n \"\"\"\n\n super().__init__()\n\n self.input_dim = input_dim\n self.degree = degree\n\n assert self.input_dim == 3\n assert self.degree >= 1 and self.degree <= 5\n\n self.out_dim = degree ** 2\n\n self.C0 = 0.28209479177387814\n self.C1 = 0.4886025119029199\n self.C2 = [\n 1.0925484305920792,\n -1.0925484305920792,\n 0.31539156525252005,\n -1.0925484305920792,\n 0.5462742152960396\n ]\n self.C3 = [\n -0.5900435899266435,\n 2.890611442640554,\n -0.4570457994644658,\n 0.3731763325901154,\n -0.4570457994644658,\n 1.445305721320277,\n -0.5900435899266435\n ]\n self.C4 = [\n 2.5033429417967046,\n -1.7701307697799304,\n 0.9461746957575601,\n -0.6690465435572892,\n 0.10578554691520431,\n -0.6690465435572892,\n 0.47308734787878004,\n -1.7701307697799304,\n 0.6258357354491761\n ]\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass of the spherical encoder.\n :param input: usually a 3D direction\n :return: the spherical harmonics of the input\n \"\"\"\n\n result = torch.empty((*input.shape[:-1], self.out_dim), dtype=input.dtype,\n device=input.device)\n x, y, z = input.unbind(-1)\n\n result[..., 0] = self.C0\n if self.degree > 1:\n result[..., 1] = -self.C1 * y\n result[..., 2] = self.C1 * z\n result[..., 3] = -self.C1 * x\n if self.degree > 2:\n xx, yy, zz = x * x, y * y, z * z\n xy, yz, xz = x * y, y * z, x * z\n result[..., 4] = self.C2[0] * xy\n result[..., 5] = self.C2[1] * yz\n result[..., 6] = self.C2[2] * (2.0 * zz - xx - yy)\n # result[..., 6] = self.C2[2] * (3.0 * zz - 1) # xx + yy + zz == 1, but this will lead to different backward gradients, interesting...\n result[..., 7] = self.C2[3] * xz\n result[..., 8] = self.C2[4] * (xx - yy)\n if self.degree > 3:\n result[..., 9] = self.C3[0] * y * (3 * xx - yy)\n result[..., 10] = self.C3[1] * xy * z\n result[..., 11] = self.C3[2] * y * (4 * zz - xx - yy)\n result[..., 12] = self.C3[3] * z * (2 * zz - 3 * xx - 3 * yy)\n result[..., 13] = self.C3[4] * x * (4 * zz - xx - yy)\n result[..., 14] = self.C3[5] * z * (xx - yy)\n result[..., 15] = self.C3[6] * x * (xx - 3 * yy)\n if self.degree > 4:\n result[..., 16] = self.C4[0] * xy * (xx - yy)\n result[..., 17] = self.C4[1] * yz * (3 * xx - yy)\n result[..., 18] = self.C4[2] * xy * (7 * zz - 1)\n result[..., 19] = self.C4[3] * yz * (7 * zz - 3)\n result[..., 20] = self.C4[4] * (zz * (35 * zz - 30) + 3)\n result[..., 21] = self.C4[5] * xz * (7 * zz - 3)\n result[..., 22] = self.C4[6] * (xx - yy) * (7 * zz - 1)\n result[..., 23] = self.C4[7] * xz * (xx - 3 * yy)\n result[..., 24] = self.C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy))\n\n return result\n","repo_name":"jorgemf/NeRF","sub_path":"nerf/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1957339035","text":"import cv2\nimport numpy as np\n\nclass GetCoordinates:\n\n # Keys to exit or reset drawing\n EXIT_GUI = ord(\"e\")\n RESET_GUI = ord(\"r\")\n\n # output: the output file contains coordinates of drawn boxes\n # caption: a title of the gui window\n # image: a path to the image file\n # click_counter: To count the number of mouse clicks while drawing a box. \n # Maximum is 4 to draw one box.\n # space_id: To label each drawn box with a number\n # coordinates: An array to store coordinates of one box.\n def __init__(self, image, output):\n self.image = cv2.imread(image).copy()\n self.output = output\n self.caption = image\n self.click_counter = 0\n self.space_id = 0\n self.coordinates = []\n\n # Create a window to display the image and bind the callback function to window\n cv2.namedWindow(self.caption, cv2.WINDOW_GUI_EXPANDED)\n cv2.setMouseCallback(self.caption, self.__drawing_a_box)\n\n # source: https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.html\n # Mouse call back function\n def __drawing_a_box(self, event, x, y, flags, params):\n # on left mousse click event\n if event == cv2.EVENT_LBUTTONDOWN:\n # add this point to coordinates array\n self.coordinates.append((x, y))\n # count the click\n self.click_counter += 1\n # One box is done drawing = 4 clicks\n if self.click_counter >= 4:\n self.__drawn_is_done_event()\n # User still making more points to make a box\n elif self.click_counter > 1:\n self.__is_drawing_event()\n\n cv2.imshow(self.caption, self.image)\n\n\n # draw a line by connecting two points while user is making points\n # source: https://docs.opencv.org/3.0-beta/modules/imgproc/doc/drawing_functions.html?highlight=line#cv2.line\n def __is_drawing_event(self):\n cv2.line(self.image, self.coordinates[-2], self.coordinates[-1], (255, 0, 0), 4)\n\n # connects remaining lines when 4 points are created\n def __drawn_is_done_event(self):\n cv2.line(self.image,\n self.coordinates[2],\n self.coordinates[3],\n (255, 0, 0),\n 4)\n cv2.line(self.image,\n self.coordinates[3],\n self.coordinates[0],\n (255, 0, 0),\n 4)\n\n # Write the coordinates of this parking space with its label id to the output\n self.output.write(\"- space_id: \" + str(self.space_id) + \"\\n points:\\n\" +\n \" - [\" + str(self.coordinates[0][0]) + \", \" + str(self.coordinates[0][1]) + \"]\\n\" +\n \" - [\" + str(self.coordinates[1][0]) + \", \" + str(self.coordinates[1][1]) + \"]\\n\" +\n \" - [\" + str(self.coordinates[2][0]) + \", \" + str(self.coordinates[2][1]) + \"]\\n\" +\n \" - [\" + str(self.coordinates[3][0]) + \", \" + str(self.coordinates[3][1]) + \"]\\n\")\n print('after writing outut')\n print(self.output)\n # draw the final contours outlines using drawContours functions\n contours = np.array(self.coordinates)\n self.__draw_contours(self.image, contours, str(self.space_id + 1))\n\n # reset the coordinates array\n for i in range(0, 4):\n self.coordinates.pop()\n\n # increment space_id\n self.space_id += 1\n # reset click counter\n self.click_counter = 0\n\n x, y = [], []\n index = 0\n for contour_line in contours:\n for contour in contour_line:\n if index % 2 == 0:\n x.append(contour)\n else:\n y.append(contour)\n index+=1\n # x.append(contour[0][0])\n # y.append(contour[0][1])\n\n x1, x2, y1, y2 = min(x), max(x), min(y), max(y)\n\n # print('[y1 = %f, y2 = %f, x1 = %f, x2=%f' %(y1, y2, x1, x2))\n # cropped = self.image[y1:y2, x1:x2]\n # cv2.imshow(\"crop\" + str(self.space_id), cropped)\n # cv2.imwrite(\"saved_crop/crop\" + str(self.space_id)+\".jpg\", cropped)\n\n\n\n def __draw_contours(self, image, contours, space_id):\n\n # Refer the parameters meaning in the documentation\n # source: https://docs.opencv.org/3.0-beta/modules/imgproc/doc/drawing_functions.html?highlight=line#cv2.line\n cv2.drawContours(image,\n [contours],\n contourIdx=-1,\n color=(255, 0, 0),\n thickness=1,\n lineType=cv2.LINE_8)\n \n # Fun math to compute center of the contours\n # source: https://www.pyimagesearch.com/2016/02/01/opencv-center-of-contour/\n get_moments = cv2.moments(contours)\n center_position = (int(get_moments[\"m10\"] / get_moments[\"m00\"]) - 3,\n int(get_moments[\"m01\"] / get_moments[\"m00\"]) + 3)\n\n #Put the space id in the center of the box\n #source: https://docs.opencv.org/3.0-beta/modules/imgproc/doc/drawing_functions.html?highlight=line#cv2.line\n cv2.putText(image,\n space_id, # text to be drawn\n center_position, # position\n cv2.FONT_HERSHEY_SIMPLEX, # font\n 0.5, # fontScale\n (0, 255, 0), # color\n 2, # thickness\n cv2.LINE_AA) # line type\n\n\n # Listen to a matching key to quit or reset\n def finish_segmenting(self):\n while True:\n cv2.imshow(self.caption, self.image)\n key = cv2.waitKey(0)\n\n if key == GetCoordinates.RESET_GUI:\n self.image = self.image.copy()\n elif key == GetCoordinates.EXIT_GUI:\n break\n cv2.destroyWindow(self.caption)","repo_name":"vunguyen7797/SmartParkingLot-Classifier","sub_path":"opencv_crop_image/get_coordinates.py","file_name":"get_coordinates.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16344949392","text":"from __future__ import annotations\n\nfrom emmet.core.mpid import MPculeID\nfrom emmet.core.qchem.molecule import MoleculeDoc\nfrom emmet.core.settings import EmmetSettings\nfrom pymatgen.core.structure import Molecule\n\nfrom mp_api.client.core import BaseRester, MPRestError\nfrom mp_api.client.core.utils import validate_ids\n\n_EMMET_SETTINGS = EmmetSettings()\n\n\nclass BaseMoleculeRester(BaseRester[MoleculeDoc]):\n document_model = MoleculeDoc\n primary_key = \"molecule_id\"\n\n def get_molecule_by_mpculeid(\n self, mpcule_id: str, final: bool = True\n ) -> Molecule | list[Molecule]:\n \"\"\"Get a molecule object for a given Materials Project molecules ID (MPculeID).\n\n Arguments:\n mpcule_id (str): Materials project molecule ID\n final (bool): Whether to get the final (optimized) molecule, or the list of initial\n (pre-optimized) structures. Defaults to True.\n\n Returns:\n molecule (Union[Molecule, List[Molecule]]): Pymatgen Molecule object or list of\n pymatgen Molecule objects.\n \"\"\"\n if final:\n response = self.get_data_by_id(mpcule_id, fields=[\"molecule\"])\n return response.molecule if response is not None else response # type: ignore\n else:\n response = self.get_data_by_id(mpcule_id, fields=[\"initial_molecules\"])\n return response.initial_molecules if response is not None else response # type: ignore\n\n def find_molecule(\n self,\n filename_or_molecule: str | Molecule,\n charge: int | None = None,\n spin_multiplicity: int | None = None,\n tolerance: float = 0.01,\n allow_multiple_results: bool = False,\n ) -> list[str] | str:\n \"\"\"Finds matching molecules from the Materials Project molecules database (MPcules).\n\n Multiple results may be returned of \"similar\" molecules based on\n distance using the pymatgen MoleculeMatcher algorithm.\n\n Args:\n filename_or_molecule: filename or Molecule object\n charge: Molecule charge. Default is None, meaning that the charge will not be used to\n restrict the output.\n spin_multiplicity: Molecule's spin multiplicity. Default is None, meaning that the output will\n not be restricted by spin multiplicity.\n tolerance: RMSD difference threshold for MoleculeMatcher\n allow_multiple_results: changes return type for either\n a single mpcule_id or list of mpcule_ids\n Returns:\n A matching mpcule_id if one is found or list of results if allow_multiple_results\n is True\n Raises:\n MPRestError\n \"\"\"\n if isinstance(filename_or_molecule, str):\n m = Molecule.from_file(filename_or_molecule)\n elif isinstance(filename_or_molecule, Molecule):\n m = filename_or_molecule\n else:\n raise MPRestError(\"Provide filename or Structure object.\")\n\n results = self._post_resource(\n body=m.as_dict(),\n params={\n \"tolerance\": tolerance,\n \"charge\": charge,\n \"spin_multiplicity\": spin_multiplicity,\n },\n suburl=\"find_molecule\",\n use_document_model=False,\n ).get(\"data\")\n\n if len(results) > 1: # type: ignore\n if not allow_multiple_results:\n raise ValueError(\n \"Multiple matches found for this combination of tolerances, but \"\n \"`allow_multiple_results` set to False.\"\n )\n return results # type: ignore\n\n if results:\n return results[0][\"molecule_id\"]\n else:\n return []\n\n def search(\n self,\n charge: tuple[int, int] | None = None,\n spin_multiplicity: tuple[int, int] | None = None,\n nelements: tuple[int, int] | None = None,\n chemsys: str | list[str] | None = None,\n deprecated: bool | None = None,\n elements: list[str] | None = None,\n exclude_elements: list[str] | None = None,\n formula: str | list[str] | None = None,\n molecule_ids: MPculeID | list[MPculeID] | None = None,\n task_ids: str | list[str] | None = None,\n num_chunks: int | None = None,\n chunk_size: int = 1000,\n all_fields: bool = True,\n fields: list[str] | None = None,\n ):\n \"\"\"Query molecule docs using a variety of search criteria.\n\n Arguments:\n charge (Tuple[int, int]): Minimum and maximum charge for the molecule.\n spin_multiplicity (Tuple[int, int]): Minimum and maximum spin for the molecule.\n nelements (Tuple[int, int]): Minimum and maximum number of elements\n chemsys (str, List[str]): A chemical system, list of chemical systems\n (e.g., Li-C-O, [C-O-H-N, Li-N]), or single formula (e.g., C2 H4).\n deprecated (bool): Whether the material is tagged as deprecated.\n elements (List[str]): A list of elements.\n exclude_elements (List(str)): List of elements to exclude.\n formula (str, List[str]): An alphabetical formula or list of formulas\n (e.g. \"C2 Li2 O4\", [\"C2 H4\", \"C2 H6\"]).\n molecule_ids (MPculeID, List[MPculeID]): List of Materials Project Molecule IDs (MPculeIDs) to return data\n for.\n task_ids (str, List[str]): List of Materials Project IDs to return data for.\n num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.\n chunk_size (int): Number of data entries per chunk.\n all_fields (bool): Whether to return all fields in the document. Defaults to True.\n fields (List[str]): List of fields in MoleculeDoc to return data for.\n Default is molecule_id, last_updated, and formula_alphabetical if all_fields is False.\n\n Returns:\n ([MoleculeDoc]) List of molecules documents\n \"\"\"\n query_params = {\"deprecated\": deprecated} # type: dict\n\n if molecule_ids:\n if isinstance(molecule_ids, str):\n molecule_ids = [molecule_ids]\n\n query_params.update({\"molecule_ids\": \",\".join(molecule_ids)})\n\n if charge:\n query_params.update({\"charge\": charge})\n\n if spin_multiplicity:\n query_params.update({\"spin_multiplicity\": spin_multiplicity})\n\n if formula:\n if isinstance(formula, str):\n formula = [formula]\n\n query_params.update({\"formula\": \",\".join(formula)})\n\n if chemsys:\n if isinstance(chemsys, str):\n chemsys = [chemsys]\n\n query_params.update({\"chemsys\": \",\".join(chemsys)})\n\n if elements:\n query_params.update({\"elements\": \",\".join(elements)})\n\n if exclude_elements:\n query_params.update({\"exclude_elements\": \",\".join(exclude_elements)})\n\n if task_ids:\n if isinstance(task_ids, str):\n task_ids = [task_ids]\n\n query_params.update({\"task_ids\": \",\".join(validate_ids(task_ids))})\n\n query_params = {\n entry: query_params[entry]\n for entry in query_params\n if query_params[entry] is not None\n }\n\n return super()._search(\n num_chunks=num_chunks,\n chunk_size=chunk_size,\n all_fields=all_fields,\n fields=fields,\n **query_params,\n )\n\n\nclass AssociatedMoleculeRester(BaseMoleculeRester):\n suffix = \"molecules/assoc\"\n\n\nclass MoleculeRester(BaseMoleculeRester):\n suffix = \"molecules/core\"\n","repo_name":"materialsproject/api","sub_path":"mp_api/client/routes/molecules/molecules.py","file_name":"molecules.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"37"} +{"seq_id":"40124277700","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\r\n nums1.extend(nums2)\r\n nums1.sort()\r\n even = len(nums1) % 2\r\n mid = len(nums1) // 2\r\n if even != 0:\r\n return nums1[mid]\r\n else:\r\n return (nums1[mid - 1] + nums1[mid]) / 2\r\n\r\n\r\nif __name__ == '__main__':\r\n sol = Solution()","repo_name":"pangyouzhen/data-structure","sub_path":"other/4 findMedianSortedArrays.py","file_name":"4 findMedianSortedArrays.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2828533196","text":"from nltk.corpus import stopwords\nfrom nlp_id.lemmatizer import Lemmatizer\nfrom nltk.tokenize import word_tokenize\n\nimport string\n\nblacklist = ['nim', 'fakultas', 'ugm', 'nama', 'prodi', 'studi', 'tk', 'teknik', 'elektro', 'informatika', 'biomedis', 'te', 'ft', 'ti', 'tif', 'tb', 'tetitb', 'tugas', 'quiz', 'kuis']\n\ndef tokenPDF(ePDF,fn):\n #Case Folding\n ePDF = ePDF.lower()\n fn = fn.lower()\n\n #Tokenize\n fn = fn.strip()\n fn = fn.translate(str.maketrans(\"\",\"\",'1234567890'))\n fn = fn.translate(str.maketrans(\"\",\"\", string.punctuation))\n\n ePDF = ePDF.strip()\n ePDF = ePDF.translate(str.maketrans(\"\",\"\",'1234567890'))\n ePDF = ePDF.translate(str.maketrans(string.punctuation, ' '*len(string.punctuation)))\n\n #Remove stopwords\n stop_words = set(stopwords.words('indonesian'))\n tokens = word_tokenize(ePDF)\n ePDF = [i for i in tokens if not i in stop_words]\n\n #Remove name\n ePDF = [i for i in ePDF if not i in fn]\n\n #Remove trash from this world\n ePDF = [i for i in ePDF if not i in blacklist]\n\n #Remove dupe clause\n #ePDF = list(dict.fromkeys(ePDF))\n\n return(ePDF)\n\ndef ligma(ePDF):\n lemma = Lemmatizer()\n ePDF = [lemma.lemmatize(str(token)) for token in ePDF]\n return(ePDF)\n \n\n","repo_name":"ignatiusaas/textpreprocess","sub_path":"tokenPDF.py","file_name":"tokenPDF.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4623072548","text":"import numpy as np\nimport importlib\n# import matplotlib.pyplot as plt\n\n__all__ = [\"imshow\", \"batch_show\"]\n\n\ndef imshow(img, bands=(3, 2, 1), multiply=1, scale=1, method='plt', cmap='gray', ret_fig=False, show=True, vrange_mode='auto', channel_first=None):\n \"\"\" Show an image with various common type, bits.\n This function support numpy.ndarry, torch.Tensor as input.\n The channel dimension can be the first or last.\n If it has more than 3 dimension, will take the first element\n for the first several dimensions.\n Args:\n img: Input image. numpy.ndarry and torch.Tensor type supported.\n bands: If the channel dimension has more than 3 bands, use this tuple to select.\n multiply: Multiply a certain number on the whole image. Used when the image is dark.\n scale: A size scale factor. When the input image's size is much too small/big, use this.\n method: The plot method. Two choices: ('plt'|'pil') For some reason, VS Code Jupyter \n may not work using matplotlib.pyplot, so you can use 'pil' instead. \n cmap: When the image only has two dimension, or only select one band, the cmap used by\n matplotlib.pyplot. Default is gray.\n ret_fig: Whether return the processed input image.\n vrange_mode: When the input image is monochrome, whether use a cmap value range auto min-max,\n or use a fixed range from 0 to 255. Select from ('auto', 'fixed').\n \"\"\"\n\n assert vrange_mode in ('auto', 'fixed')\n\n if not show:\n ret_fig = True\n res = None\n\n if method not in ('plt', 'pil'):\n print(\"Please choose \\'plt\\' or \\'pil\\' for method.\")\n return None\n\n def show_img(image, method, cmap):\n if method.lower() == 'plt':\n plt = importlib.import_module('matplotlib.pyplot')\n plt.figure()\n if len(image.shape) == 2:\n if vrange_mode == 'auto':\n plt.imshow(image, cmap=cmap) # Default: \"viridis\"\n else:\n plt.imshow(image, cmap=cmap, vmin=0, vmax=255)\n else:\n plt.imshow(image)\n return None\n elif method.lower() == 'pil':\n Image = importlib.import_module('PIL.Image')\n return Image.fromarray(image)\n\n def rescale(image, multiply, scale):\n imax = image.max()\n if imax <= 1:\n image = (image*255)\n elif imax > 255:\n image = ((image+1)/256)\n else:\n image = image\n image = np.clip(image*multiply, 0, 255).astype(np.uint8)\n # image = np.clip(image, 0, 255)\n if scale <= 0:\n raise ValueError(\"scale should be bigger than 0!\")\n elif scale != 1:\n cv2 = importlib.import_module('cv2')\n image = cv2.resize(image, dsize=None, fx=scale,\n fy=scale, interpolation=cv2.INTER_CUBIC)\n return image\n\n if not isinstance(img, np.ndarray):\n try:\n torch = importlib.import_module('torch')\n if isinstance(img, torch.Tensor):\n img = img.cpu().detach().numpy()\n except:\n raise ImportError(\"Pytorch not installed!\")\n\n while len(img.shape) > 3:\n img = img[0]\n\n shp = img.shape\n\n if len(shp) < 2:\n print(\"Invalid Input Type!\")\n elif len(shp) == 2:\n img = rescale(img, multiply, scale)\n if show:\n res = show_img(img, method, cmap)\n else:\n if (shp[0] < shp[-1] and channel_first is None) or channel_first:\n img = img.transpose(1, 2, 0)\n shp = img.shape\n if shp[-1] > 3:\n img = img[:, :, bands]\n elif bands != (3,2,1):\n img = img[:, :, bands]\n img = rescale(img, multiply, scale)\n if show:\n res = show_img(img, method, cmap)\n if res is not None:\n return res\n if ret_fig:\n return img\n\n\ndef batch_show(imgs, sub_titles=None, title=None, row_labels=None, \n col_labels=None, cmap='gray', vrange_mode='fixed', \n ret_fig=False, font_size=(20,20,20), \n font_type='Times New Roman', sub_size=(3,3)):\n \"\"\" Show images. \n Args:\n imgs: Supposed to be an 2-d list or tuple. Each element is an image in numpy.ndarray format.\n sub_titles: Titles of each subplot.\n title: The image overall title.\n cmap: When the image only has two dimension, or only select one band, the cmap used by\n matplotlib.pyplot. Default is gray.\n vrange_mode: When the input image is monochrome, whether use a cmap value range auto min-max,\n or use a fixed range from 0 to 255. Select from ('auto', 'fixed').\n ret_fig: Whether return the processed input image.\n font_size: tuple/list/int/float, the font sizes of row, column, and subtitle. If input type is\n int/float, set all font sizes the same.\n font_type: str, the font name of your desired font type.\n \"\"\"\n if not (isinstance(imgs[0], list) or isinstance(imgs[0], tuple)):\n imgs = [imgs]\n if not (isinstance(font_size, list) or isinstance(font_size, tuple)):\n font_size = (font_size, font_size, font_size)\n rows = len(imgs)\n cols = max([len(i) for i in imgs])\n\n plt = importlib.import_module('matplotlib.pyplot')\n # plt.figure()\n fig, axs = plt.subplots(rows, cols, figsize=(sub_size[0]*cols, sub_size[1]*rows), sharey=True)\n if rows == 1:\n axs = [axs]\n if cols == 1:\n axs = [[i] for i in axs]\n axs = np.array(axs)\n for i in range(len(imgs)):\n for j in range(len(imgs[i])):\n img = imgs[i][j]\n if sub_titles is not None and len(sub_titles) > i and len(sub_titles[i]) > j:\n sub_title = sub_titles[i][j]\n else:\n sub_title = ''\n if len(img.shape) == 2 or img.shape[0] == 1 or img.shape[-1] == 1:\n if vrange_mode == 'fixed':\n axs[i, j].imshow(img, cmap=cmap, vmin=0, vmax=255)\n else:\n axs[i, j].imshow(img, cmap=cmap)\n else:\n axs[i, j].imshow(img)\n axs[i, j].set(xticks=[], yticks=[])\n if row_labels is not None and len(row_labels) > i:\n axs[i, j].set_ylabel(row_labels[i], fontsize=font_size[0], fontname=font_type)\n if col_labels is not None and len(col_labels) > j:\n axs[i, j].set_xlabel(col_labels[j], fontsize=font_size[1], fontname=font_type)\n if sub_title != '':\n axs[i, j].set_title(sub_title, fontsize=font_size[2], y=-0.15, fontname=font_type)\n\n for ax in axs.flat:\n ax.label_outer()\n\n if title is not None:\n fig.suptitle(title, fontsize=30)\n plt.tight_layout()\n\n if ret_fig:\n return fig\n","repo_name":"miracleyoo/mlib","sub_path":"cv/image/imshow.py","file_name":"imshow.py","file_ext":"py","file_size_in_byte":6880,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"1016675625","text":"import slack,time,io\n\nfrom contextlib import redirect_stdout,redirect_stderr\n\nclass SlackBot:\n\n starterbot_id = None\n RTM_READ_DELAY = 0.5\n\n def __init__(self, oauthtoken):\n self.token = oauthtoken\n self.slack_rtm = slack.RTMClient(token=self.token)\n self.slack_cli = slack.WebClient(token=self.token)\n self.setup_bot_events()\n\n def post_message(self, message, channel):\n return self.slack_cli.chat_postMessage(\n channel=channel,\n text=message\n )\n\n def get_user_info(self, user): # Probably pointless having this func\n return self.slack_cli.users_info(\n user=user\n )['user']\n\n commands = {}\n\n def register_command(self, keyword, callback):\n if keyword in self.commands:\n print(f\"WARNING: Chat expression {keyword} already registered to callback {commands[keyword].__name__}. Re-registering to {callback.__name__}\")\n\n self.commands[keyword] = callback\n print(f\"Command {keyword} registered to callback {callback.__name__}\")\n\n def command(self, keyword, **kwargs):\n def decorator(callback):\n self.register_command(keyword, callback)\n return callback\n return decorator\n\n def require_admin(self, func):\n def wrap(message):\n if not self.get_user_info(message['user'])['is_admin']:\n return f\"<@{message['user']}> Sorry, only workspace administrators can use this command..\"\n else:\n return func(message)\n return wrap\n \n def command_args(self, parser):\n \"\"\"\n Function decorator to more easily parse arguments.\n \n parser: Some argument parsing object (argparse.ArgumentParser)\n which has child function parse_args, returning a dict\n of named arguments.\n \"\"\"\n def makeWrapper(func):\n \n def wrapper(message):\n args = message['text'].split(\" \")[1:]\n with io.StringIO() as buf, redirect_stdout(buf), redirect_stderr(buf):\n try:\n args = vars(parser.parse_args(args))\n output = buf.getvalue()\n if len(output):\n bot.say_in_channel(f\"```{output}```\", message['channel'])\n except SystemExit:\n output = buf.getvalue()\n if len(output):\n self.post_message(f\"```{output}```\", message['channel'])\n return\n return func(message, args)\n\n return wrapper\n \n return makeWrapper \n\n def handle_message(self, **payload):\n \"\"\"\n Executes bot command if the command is known\n \"\"\"\n if 'subtype' in payload['data']: return\n\n channel = payload['data']['channel']\n user = payload['data']['user']\n text = payload['data']['text']\n \n if text[0] != '!': return # Only respond to cmds prefaced with !\n\n default_response = \"I'm not sure what you mean. Use !help for help.\"\n response = None\n command = text.split()\n command[0] = command[0].lower()[1:]\n\n if(command[0] in self.commands):\n response = self.commands[command[0]](payload['data'])\n else:\n response = '<@' + user + '> ' + default_response\n \n if response == None: return\n \n self.post_message(response, channel)\n\n def setup_bot_events(self):\n self.slack_rtm.on(event='message', callback=self.handle_message)\n\n def run(self):\n print(\"SYCS Bot Running\")\n self.slack_rtm.start()\n","repo_name":"sycs-climate/sycsbot","sub_path":"slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5374513375","text":"#!/usr/bin/env python3\n\n'''\nFunctions for PDF related to image\n\npage here is from fitz, unlike others, which are of PyPDF2\n and fitz is contained in module `PyMuPDF`\n\ntwo libraries used: fitz and reportlab\n'''\n\nimport os\n\nfrom io import BytesIO\nimport re\n\nfrom PIL import Image\n\nimport fitz\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.utils import ImageReader\n\nfrom .funcs_path import ext_elements_by_range, list_files_by_range_fmt\nfrom .funcs_page import get_pagesize_by_name\n\n# convert page to PIL Image\ndef page_to_image(page, write_to_file=None, **kwargs):\n '''\n convert page to PIL image or write to a file\n\n Parameters:\n write_to_file: None or string\n if None, return a PIL image object\n otherwise, specify a file name to write image to\n '''\n pix=page_to_pixmap(page, **kwargs)\n\n png=pix.getPNGData()\n\n if write_to_file:\n with open(write_to_file, 'wb') as f:\n f.write(png)\n return\n \n return Image.open(BytesIO(png))\n\ndef page_to_pixmap(page, zoomxy=2, alpha=False):\n '''\n convert page to pixel map\n\n zoom coefficient\n image size by default: 792X612, dpi=96\n zoom_x=1.33333333 # (1.33333333-->1056x816), 1056/792=1.333333\n zoom_y=1.33333333\n '''\n zoom_x=zoom_y=zoomxy\n mat=fitz.Matrix(zoom_x, zoom_y)\n\n return page.getPixmap(matrix=mat, alpha=alpha)\n\n# fitz page\ndef yield_fitz_pages_from_pdf(pdfname, page_range=None):\n '''\n return page number and page\n\n page_range must be given with `one_started` and `keep_end`\n\n yield page id, page\n where id starts from 1\n '''\n pdf=fitz.open(pdfname)\n\n pages=range(len(pdf))\n\n if page_range is not None:\n pages=ext_elements_by_range(pages, page_range, keep_end=True, one_started=True)\n\n for p in pages:\n yield p+1, pdf[p]\n\n# write functions\ndef write_page_to_file(page, fname):\n page_to_image(page, write_to_file=fname)\n\ndef write_pdf_to_dir_image(pdfname, dir_image='pages',\n fname_format='page-%i.png', page_range=None):\n '''\n extract pages in a PDF to a directory\n '''\n if not os.path.exists(dir_image):\n os.mkdir(dir_image)\n\n # format for image file name\n f=lambda p, fmt=fname_format: fmt % p\n\n for pageid, page in yield_fitz_pages_from_pdf(pdfname, page_range=page_range):\n fname=os.path.join(dir_image, f(pageid))\n print('write to %s' % fname)\n\n write_page_to_file(page, fname)\n\n# create pdf from images or other pdf\ndef mkpdf_from_images(pdf_out, images, pagesize='a4', pagescale=None, **kwargs):\n '''\n make pdf from pages\n\n optional keyword arguments:\n page_range\n fname_format\n '''\n pagesize=get_pagesize_by_name(pagesize, pagescale)\n\n c=canvas.Canvas(pdf_out, pagesize=pagesize)\n\n for i, p in enumerate(yield_images(images, **kwargs)):\n if type(p) is str:\n print('add page %i: %s' % (i+1, p))\n else:\n print('add page', i+1)\n\n add_image_page(c, p)\n\n c.showPage()\n\n c.save()\n\n## auxilliary functions for canvas drawing\ndef add_image_page(c, img):\n '''\n add image to a pdf canvas\n\n Parameters:\n c: `canvas.Canvas`\n img: str or PIL Image\n '''\n if type(img) is str:\n img=Image.open(img)\n\n pagesize=get_pagesize_of_canvas(c)\n\n rect=page_draw_region(pagesize, img.size)\n c.drawImage(ImageReader(img), *rect)\n\ndef get_pagesize_of_canvas(c):\n '''\n pagesize of a canvas\n\n more setup of canvas could refer to `c.__dict__`\n '''\n return c._pagesize\n\ndef page_draw_region(pagesize, imgsize):\n '''\n determine draw region for an image in the page\n\n return (left, bottom, right, top)\n '''\n w0, h0=pagesize\n w1, h1=imgsize\n\n # scale image to page\n scale=min(w0/w1, h0/h1)\n w1*=scale\n h1*=scale\n\n # determine region\n dw, dh=w0-w1, h0-h1\n\n left=dw/2\n right=left+w1\n\n bottom=dh/2\n top=bottom+h1\n\n return left, bottom, right, top\n\n## yield images from image name list, image directory or PDF file\ndef yield_images(images, **kwargs):\n '''\n yield images from list, directory or PDF\n '''\n if type(images) is str:\n if os.path.isdir(images):\n func=yield_images_from_dir\n else:\n func=yield_images_from_pdf\n else:\n func=yield_images_from_list\n\n for p in func(images, **kwargs):\n yield p\n\ndef yield_images_from_list(images, page_range=None):\n '''\n yield PIL image from list\n\n page_range must be given with one_started=False and keep_end=False\n '''\n if page_range is not None:\n images=ext_elements_by_range(images, ele_range=page_range, keep_end=False, one_started=False)\n \n for img in images:\n yield img\n\ndef yield_images_from_pdf(pdfname, **kwargs):\n '''\n yield PIL image from PDF file\n '''\n for _, page in yield_fitz_pages_from_pdf(pdfname, **kwargs):\n yield page_to_image(page)\n\ndef yield_images_from_dir(dir_images, **kwargs):\n for fname in list_files_by_range_fmt(dir_images, **kwargs):\n yield fname\n\n# image operations\n## crop image\ndef crop_image(img, left=0, right=1, lower=0, upper=1):\n '''\n `left, upper, width, height` are given as a ratio to the image size\n generally ranging from 0 to 1\n '''\n w, h=img.size\n\n left=w*left\n upper=h*upper\n\n right=w*right\n lower=h*lower\n\n return img.crop(box=(left, lower, right, upper))\n\ndef split_images_horizontal(dir_images, page_range=None, dir_out=None,\n prefix_fmt='page-%i', prefix_out_fmt='crop-%i', fig_suffix='.png',\n sep=0.5, ncrop_starts=1):\n '''\n split each page in a directory `dir_images` within in range `page_range`\n into 2 parts in horizontal direction\n of which fraction is given by `sep`\n that means two parts are (0, sep) and (sep 1)\n '''\n if dir_out is None:\n dir_out=dir_images\n\n if not os.path.exists(dir_out):\n os.mkdir(dir_out)\n\n fnames=list_files_by_range_fmt(dir_images, page_range=page_range,\n fname_format=(prefix_fmt+fig_suffix))\n\n ncrop=ncrop_starts\n for fname in fnames:\n print('split %s ==> crop %i, %i' % (fname, ncrop, ncrop+1))\n img=Image.open(fname)\n\n outfname=os.path.join(dir_out, (prefix_out_fmt % ncrop)+fig_suffix)\n crop=crop_image(img, right=sep)\n crop.save(outfname)\n ncrop+=1\n\n outfname=os.path.join(dir_out, (prefix_out_fmt % ncrop)+fig_suffix)\n crop=crop_image(img, left=sep)\n crop.save(outfname)\n ncrop+=1\n","repo_name":"hujh08/EbooksEdit","sub_path":"funcs_image.py","file_name":"funcs_image.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24272077944","text":"## filter out v1 data from masks\nimport os, json, time, pickle\nimport nibabel as nib\nimport numpy as np\nfrom sklearn import linear_model\nimport itertools as it\n\n# initialize parameters\n### work_dir = '/Users/chloe/Documents/'\n### all_subjects = ['sub-02', 'sub-04']\nwork_dir = '/mindhive/saxelab3/anzellotti/forrest/derivatives/fmriprep/'\nmask_dir = '/mindhive/saxelab3/anzellotti/forrest/rV1_mask_funcSize_bin.nii.gz'\nall_subjects = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-09', 'sub-10', 'sub-14', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20']\nmask = 'rV1'\ntotal_run = 8\n\n# cd to work directory\nos.chdir(work_dir)\nmask_data = nib.load(mask_dir).get_data()\n\n# preprocess data: filter movie data with rois for all subjects\nfor s in range(0, len(all_subjects)):\n\t# data initialize\n\tsub = all_subjects[s]\n\tsub_dir = work_dir + sub + '_complete/'\n\tsub_data_dir = sub_dir + 'ses-movie/func/'\n\tpre_out_dir = sub_dir + sub + '_pre/'\n\t# make output dir if not exist\n\tif not os.path.exists(pre_out_dir):\n\t\tos.makedirs(pre_out_dir)\n\n\t# iterate through all runs of this subject and filter by roi masks\n\tfor run in range(1, total_run + 1):\n\t\trun_dir = sub_data_dir + sub + '_ses-movie_task-movie_run-' + str(run) + '_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'\n\t\trun_data = nib.load(run_dir).get_data()\n\t\troi_out_dir = pre_out_dir + sub + '_' + mask + '_run_' + str(run) + '.npy'\n\t\troi_data = np.zeros((run_data.shape[3], int(np.sum(mask_data))))\n\n\t\t# iterate through all voxels to find roi voxels\n\t\troi_index = 0\n\t\tfor t in range(0, run_data.shape[3]):\n\t\t\tfor x in range(0, run_data.shape[0]):\n\t\t\t\tfor y in range(0, run_data.shape[1]):\n\t\t\t\t\tfor z in range(0, run_data.shape[2]):\n\t\t\t\t\t\tif mask_data[x, y, z] == 1:\n\t\t\t\t\t\t\troi_data[t, int(roi_index)] = run_data[x, y, z, t]\n\t\t\t\t\t\t\troi_index += 1\n\t\t\troi_index = 0 # to next row, reset column indices\n\n\t\t# save roi data\n\t\tnp.save(roi_out_dir, roi_data)","repo_name":"yl3506/iMVPD_dev","sub_path":"data/data_filter_v1.py","file_name":"data_filter_v1.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36292835376","text":"def filter_prime(x):\n if x < 2:\n return False\n for i in range(2, x):\n if (x % i) == 0:\n return False\n else:\n return True\n\nx = int(input())\nthislist = []\nfor i in range(x):\n y = int(input())\n thislist.append(y)\n \nprime_numbers = list(filter(lambda x: filter_prime(x), thislist))\n\nprint(prime_numbers)\n","repo_name":"alikhanmurat/PP2","sub_path":"Lab3/Classes-Objects/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1881429859","text":"import numpy as N\nfrom pubsub import pub\nfrom wxgraph.gui_mode import GUIModeBase, GraphCursors\nfrom wxgraph.events import *\nfrom wxgraph import DrawObjectSquarePoint\nimport wxgraph.util_bbox as bbox\nfrom .shape_state_node import StateNodeShape, FinalStateNodeShape, InitStateNodeShape, StateChartNode\nfrom .shape_note_node import NoteNodeShape\nfrom .shape_transition import TransitionWireShape, StateChartTransition\nfrom .define_gui import *\nfrom application.define import *\n\n\n# some mix-ins for use with the other modes:\nclass GUIModeZoomWithMouseWheelMixin:\n def on_wheel(self, event: wx.MouseEvent):\n _scale = self.canvas.scale\n _scale += event.GetWheelRotation() / (event.GetWheelDelta() * 10)\n _pos = event.GetPosition()\n if event.ControlDown():\n if _scale < self.canvas.minScale: _scale = self.canvas.minScale\n if _scale > self.canvas.maxScale: _scale = self.canvas.maxScale\n self.canvas.zoom(_scale / self.canvas.scale, _pos, center_coords=\"Pixel\", keep_point_in_place=True)\n super(GUIModeZoomWithMouseWheelMixin, self).on_wheel(event)\n\n\nclass GUIModePlace(GUIModeBase):\n def __init__(self, shape_type=None, canvas=None):\n GUIModeBase.__init__(self, canvas)\n self._shapeType = shape_type\n self._isShapeTypeReady = False\n\n def _add_item(self, item):\n _canvas_parent = self.canvas.GetParent()\n if item is None:\n return\n _canvas_parent.add_item(item)\n\n def set_shape_type(self, shape_type):\n self._shapeType = shape_type\n\n def on_left_down(self, event):\n self._isShapeTypeReady = self._shapeType is not None\n super(GUIModePlace, self).on_left_down(event)\n\n def on_left_up(self, event: wx.MouseEvent):\n _pos = event.GetPosition()\n _world_pos = wx.RealPoint(self.canvas.pixel_to_world(_pos))\n if self._isShapeTypeReady:\n _item = None\n if self._shapeType == EnumCanvasToolbarMode.STATE:\n _item = StateNodeShape(_world_pos, 'Untitled State')\n elif self._shapeType == EnumCanvasToolbarMode.INIT_STATE:\n _item = InitStateNodeShape(_world_pos)\n elif self._shapeType == EnumCanvasToolbarMode.SUB_STATE:\n _item = None\n elif self._shapeType == EnumCanvasToolbarMode.FINAL_STATE:\n _item = FinalStateNodeShape(_world_pos)\n elif self._shapeType == EnumCanvasToolbarMode.NOTE:\n _str = \"This is a note component.\\nyou could add more text\\n...\"\n _item = NoteNodeShape(_world_pos, _str)\n if _item is not None:\n self._add_item(_item)\n self._isShapeTypeReady = False\n super(GUIModePlace, self).on_left_up(event)\n\n\nclass GUIModeConnection(GUIModeBase):\n def __init__(self, canvas=None):\n GUIModeBase.__init__(self, canvas)\n self.srcNode = None\n self.dstNode = None\n self.srcPort = None\n self.dstPort = None\n self.wire = None\n\n def on_left_down(self, event):\n _pos = event.GetPosition()\n _world_pos = wx.RealPoint(self.canvas.pixel_to_world(_pos))\n _hit_object = self.canvas.objectUnderMouse\n _canvas_parent = self.canvas.GetParent()\n if _canvas_parent is not None:\n if _hit_object is not None and isinstance(_hit_object, StateChartNode):\n if _hit_object.get_connection_style() != EnumShapeConnectionStyle.NONE:\n self.srcNode = _hit_object\n _closest_pt, _uv = _hit_object.get_connection_point_at(_world_pos)\n _wire = TransitionWireShape(_closest_pt, _closest_pt)\n _wire.srcNode = _hit_object\n self.wire = self.canvas.add_object(_wire)\n self.wire.set_connection_invalid_style()\n self.canvas.draw()\n super(GUIModeConnection, self).on_left_down(event)\n\n def on_left_up(self, event):\n _pos = event.GetPosition()\n _world_pos = wx.RealPoint(self.canvas.pixel_to_world(_pos))\n _hit_object = self.canvas.objectUnderMouse\n _canvas_parent = self.canvas.GetParent()\n if _canvas_parent is not None:\n if _hit_object is not None and self.srcNode is not None and isinstance(_hit_object, StateChartNode):\n if _hit_object.get_connection_style()!=EnumShapeConnectionStyle.NONE:\n self.dstNode = _hit_object\n _closest_pt, _uv = _hit_object.get_connection_point_at(_world_pos)\n _canvas_parent.add_connection_pair(self.srcNode, self.dstNode, self.wire.srcPt, _closest_pt)\n if self.wire is not None:\n self.canvas.remove_object(self.wire)\n self.srcNode = None\n self.dstNode = None\n self.wire = None\n self.canvas.draw()\n super(GUIModeConnection, self).on_left_up(event)\n\n def on_motion(self, event):\n _pos = event.GetPosition()\n _world_pos = self.canvas.pixel_to_world(_pos)\n _hit_object = self.canvas.objectUnderMouse\n if self.wire is not None:\n if _hit_object is not None:\n if _hit_object.get_connection_style() != EnumShapeConnectionStyle.NONE:\n self.wire.set_connection_valid_style()\n self.wire.dstNode = _hit_object\n else:\n self.wire.set_connection_invalid_style()\n if self.srcNode is not None:\n self.wire.set_dst_point(N.array(_world_pos, N.float))\n self.canvas.draw(True)\n\n super(GUIModeConnection, self).on_motion(event)\n\n\nclass GUIModeMouse(GUIModeZoomWithMouseWheelMixin, GUIModeBase):\n \"\"\"\n\n Mouse mode checks for a hit test, and if nothing is hit,\n raises a FloatCanvas mouse event for each event.\n\n \"\"\"\n\n # todo: multi item select and move\n # todo: multi item select with ctrl_key and rubberband\n def __init__(self, canvas=None):\n GUIModeBase.__init__(self, canvas)\n self.selectedNodeItems = list()\n self.selectedWireItem = None\n self.orgPos = None\n self.curPos = None\n # variable for wire and rewire\n self.useMouseRewire = False\n self.wireOrgSrcNode = None\n self.wireOrgDstNode = None\n # variable for pan\n self.panCursor = self.cursors.handCursor\n self.panGrabCursor = self.cursors.grabHandCursor\n self.panStartMove = None\n self.panMidMove = None\n self.panPrevMoveXY = None\n self.panEndMove = None\n self.panMoveTimer = wx.PyTimer(self.on_pan_move_timer)\n self.panRedrawDelayMs = 10\n # variable for rubber bandbox\n self.rbDrawRect = False\n self.rbStartPos = None\n self.rbRect = None\n # highlight items\n # bind the event\n pub.subscribe(self.on_canvas_mode_changed, EnumAppSignals.sigV2VCanvasToolbarModeChanged.value)\n\n def _update_position(self, pos):\n self.orgPos = self.curPos\n self.curPos = pos\n\n def _get_position_diff(self, reverse=False):\n if self.orgPos is None or self.curPos is None:\n return 0, 0\n if reverse:\n return self.orgPos - self.curPos\n return self.curPos - self.orgPos\n\n def _on_select_node_item(self, item):\n\n _ctrl_key_state = wx.GetKeyState(wx.WXK_CONTROL)\n # if item in self.selectedItems and item.isSelected:\n item.set_selected(not item.isSelected)\n if not item.isSelected and item in self.selectedNodeItems:\n self.selectedNodeItems.remove(item)\n elif item.isSelected and item not in self.selectedNodeItems:\n self.selectedNodeItems.append(item)\n if not _ctrl_key_state and item.isSelected:\n self._reset_selected_node_items_style(item)\n\n def _reset_selected_node_items_style(self, exclusive=None):\n for x in self.selectedNodeItems:\n if exclusive is not None:\n if x is exclusive:\n continue\n x.set_selected(False)\n\n def _on_rewire(self, item, pos_diff, object_at, current_pos):\n _len_ctrl_pt = item.get_control_points_length()\n if item.currentSelectedCtrlPtIdx == 0:\n _conn_pt = None\n if item.srcNode is not None:\n _conn_pt, _uv = item.srcNode.get_connection_point_at(current_pos)\n item.set_connection_valid_style()\n else:\n item.set_connection_invalid_style()\n if _conn_pt is None:\n # check if is connection broken\n item.srcNode = None\n if object_at is not None:\n if object_at is not item.srcNode and isinstance(object_at, StateChartNode):\n item.srcNode = object_at\n item.set_src_point(current_pos)\n else:\n # still connectable\n item.set_src_point(_conn_pt)\n elif item.currentSelectedCtrlPtIdx == _len_ctrl_pt - 1:\n _conn_pt = None\n if item.dstNode is not None:\n _conn_pt, _uv = item.dstNode.get_connection_point_at(current_pos)\n else:\n item.set_connection_invalid_style()\n if _conn_pt is None:\n # check if is connection broken\n item.dstNode = None\n if object_at is not None:\n if object_at is not item.dstNode and isinstance(object_at, StateChartNode):\n item.dstNode = object_at\n item.set_dst_point(current_pos)\n else:\n # still connectable\n item.set_dst_point(_conn_pt)\n item.move_control_point(pos_diff)\n\n def _on_draw_rubber_band(self, event):\n _x, _y = self.rbStartPos\n _corner_x, _corner_y = event.GetPosition()\n _w, _h = (_corner_x - _x, _corner_y - _y)\n if abs(_w) > 5 and abs(_h) > 5:\n # draw the RB box\n _dc = wx.ClientDC(self.canvas)\n _dc.SetPen(wx.Pen('#0ab', 1, wx.SHORT_DASH))\n _dc.SetBrush(wx.TRANSPARENT_BRUSH)\n _dc.SetLogicalFunction(wx.XOR)\n if self.rbRect:\n _dc.DrawRectangle(*self.rbRect)\n self.rbRect = ((_x, _y), (_w, _h))\n _dc.DrawRectangle(*self.rbRect)\n # self.canvas.raise_graph_event(event, EVT_FC_MOTION)\n\n def on_canvas_mode_changed(self, mode):\n self._reset_selected_node_items_style()\n\n def on_pan_move_timer(self):\n self.canvas.draw()\n\n # Handlers\n def on_left_down(self, event):\n _pos = event.GetPosition()\n _world_pos = self.canvas.pixel_to_world(_pos)\n _hit_object = self.canvas.objectUnderMouse\n _ctrl_key_state = wx.GetKeyState(wx.WXK_CONTROL)\n if _hit_object is not None:\n if isinstance(_hit_object, TransitionWireShape):\n self.selectedWireItem = _hit_object\n self._reset_selected_node_items_style()\n self.selectedNodeItems.clear()\n _hit_object.guess_control_point(_world_pos)\n _len = _hit_object.get_control_points_length()\n self.wireOrgSrcNode = _hit_object.srcNode\n self.wireOrgDstNode = _hit_object.dstNode\n if _hit_object.currentSelectedCtrlPtIdx != -1:\n self.useMouseRewire = True\n if _hit_object.currentSelectedCtrlPtIdx == 0 or _hit_object.currentSelectedCtrlPtIdx == _len - 1:\n _hit_object.save_hit()\n _hit_object.unbind_all()\n self.orgPos = _world_pos\n self.curPos = _world_pos\n elif isinstance(_hit_object, StateChartNode):\n if _ctrl_key_state or not self.selectedNodeItems:\n self._on_select_node_item(_hit_object)\n elif len(self.selectedNodeItems) == 1:\n if self.selectedNodeItems[0] is not _hit_object:\n self._reset_selected_node_items_style()\n self.selectedNodeItems.clear()\n self._on_select_node_item(_hit_object)\n self.orgPos = _world_pos\n self.curPos = _world_pos\n else:\n self.rbStartPos = _pos\n self.rbDrawRect = True\n self.blockGraphEvent = True\n self._reset_selected_node_items_style()\n self.selectedNodeItems.clear()\n super(GUIModeMouse, self).on_left_down(event)\n\n def on_left_up(self, event):\n _hit_object = self.canvas.objectUnderMouse\n _canvas_parent = self.canvas.GetParent()\n _ctrl_key_state = wx.GetKeyState(wx.WXK_CONTROL)\n if _ctrl_key_state:\n pass\n else:\n if self.selectedWireItem is not None:\n self.useMouseRewire = False\n if self.selectedWireItem.srcNode is None or self.selectedWireItem.dstNode is None:\n if _canvas_parent:\n _canvas_parent.remove_connection_pair(self.selectedWireItem, self.wireOrgSrcNode,\n self.wireOrgDstNode)\n else:\n if _canvas_parent:\n _canvas_parent.update_connection_pair(self.selectedWireItem, self.wireOrgSrcNode,\n self.wireOrgDstNode)\n self.selectedWireItem.restore_hit()\n self.selectedWireItem.currentSelectedCtrlPtIdx = -1\n if self.selectedNodeItems:\n for item in self.selectedNodeItems:\n pass\n if _hit_object is None:\n self.selectedNodeItems.clear()\n self.curPos = None\n self.orgPos = None\n self.selectedWireItem = None\n self.wireOrgSrcNode = None\n self.wireOrgDstNode = None\n if self.rbDrawRect:\n self.rbDrawRect = False\n self.blockGraphEvent = False\n self.rbRect = None\n if self.rbRect:\n _world_rect = (self.canvas.pixel_to_world(self.rbRect[0]),\n self.canvas.scale_pixel_to_world(self.rbRect[1]))\n # wx.CallAfter(self.CallBack, _world_rect)\n print('rb_band is already', _world_rect)\n self.canvas.draw(True)\n super(GUIModeMouse, self).on_left_up(event)\n\n def on_middle_down(self, event):\n self.canvas.SetCursor(self.panGrabCursor)\n self.panStartMove = N.array(event.GetPosition())\n self.panMidMove = self.panStartMove\n self.panPrevMoveXY = (0, 0)\n super(GUIModeMouse, self).on_middle_down(event)\n\n def on_middle_up(self, event):\n self.canvas.SetCursor(self.cursor)\n if self.panStartMove is not None:\n self.panEndMove = N.array(event.GetPosition())\n _diff_move = self.panMidMove - self.panEndMove\n self.canvas.move_image(_diff_move, 'Pixel', redraw=True)\n super(GUIModeMouse, self).on_middle_up(event)\n\n def on_motion(self, event: wx.MouseEvent):\n _pos = event.GetPosition()\n _world_pos = wx.RealPoint(self.canvas.pixel_to_world(_pos))\n _hit_object = self.canvas.objectUnderMouse\n if event.Dragging():\n if event.LeftIsDown():\n self._update_position(_world_pos)\n _pos_diff = self._get_position_diff()\n if self.selectedNodeItems:\n for item in self.selectedNodeItems:\n item.move(_pos_diff)\n for in_wire in item.inWires:\n in_wire.set_dst_point(in_wire.dstPt + _pos_diff)\n for out_wire in item.outWires:\n out_wire.set_src_point(out_wire.srcPt + _pos_diff)\n if self.selectedWireItem:\n _object_at = self.canvas.object_at(_world_pos)\n self._on_rewire(self.selectedWireItem, _pos_diff, _object_at, _world_pos)\n if self.rbDrawRect:\n self._on_draw_rubber_band(event)\n elif event.MiddleIsDown() and self.panStartMove is not None:\n self.panEndMove = N.array(_pos)\n _diff_move = self.panMidMove - self.panEndMove\n # reset the canvas without re-drawing\n self.canvas.move_image(_diff_move, 'Pixel', redraw=False)\n self.panMidMove = self.panEndMove\n self.panMoveTimer.Start(self.panRedrawDelayMs, oneShot=True)\n self.canvas.draw(True)\n super(GUIModeMouse, self).on_motion(event)\n\n def on_key_down(self, event: wx.KeyEvent):\n _k_code = event.GetKeyCode()\n if _k_code == wx.WXK_DELETE or _k_code == wx.WXK_NUMPAD_DELETE:\n if self.selectedNodeItems:\n _lst = [x for x in self.selectedNodeItems]\n pub.sendMessage(EnumAppSignals.sigV2VGuiModeDelItemRequested.value, items=_lst)\n\n def update_screen(self):\n # The screen has been re-drawn, so StartMove needs to be reset.\n self.panStartMove = self.panMidMove\n","repo_name":"yiyunzhi/pxcmbt","sub_path":"gui/gui_mode.py","file_name":"gui_mode.py","file_ext":"py","file_size_in_byte":17289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19971378574","text":"__author__ = 'jer'\nimport random\n\ncount = 0\n\ndef swap(a,i,j):\n a[i],a[j] = a[j],a[i]\n\n\ndef qsort(a,low=0,high=-1):\n global count\n if high == -1:\n high = len(a) -1\n if low < high:\n swap(a,low, random.randint(low,high))\n m = low\n for j in range(low+1,high+1):\n count+=1\n\n if a[j] < a[low]:\n m += 1\n swap(a,m,j)\n\n\n swap(a,low,m)\n qsort(a,low,m-1)\n qsort(a,m+1,high)\n\n\n\na = [0]*100000\nfor i in range(100000):\n a[i] = random.randint(0,50000000)\nprint(\"a gegenereerd\")\nprint(a[0:10])\n\nqsort(a)\nprint(a[0:10])\nprint(count)\ncount = 0\n\n","repo_name":"JeremyRuizenaar/alds201617","sub_path":"opdracht_w2opdracht5.py","file_name":"opdracht_w2opdracht5.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14129368455","text":"\ndef minibatch_fn(net, loss_fn, optimizer, batch):\n \"\"\"\n Trains network for a single batch.\n\n Args:\n net (torch network)\n ANN network (nn.Module)\n\n loss_fn (torch loss)\n loss function for SGD\n\n optimizer (torch optimizer)\n optimizer for SGD\n\n batch (torch.Tensor)\n vectorized input images\n \n Returns:\n Nothing.\n \"\"\"\n\n output_train = net(batch)\n loss = loss_fn(output_train, batch)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return(loss)\n\nn_epochs = 3\nbatch_size = 64\n\nencoding_size = 16\n\nmodel = nn.Sequential(nn.Linear(input_size, encoding_size),\n nn.PReLU(),\n nn.Linear(encoding_size, input_size),\n nn.Sigmoid())\nwith plt.xkcd():\n runSGD(model, input_train, input_test, criterion='mse',\n n_epochs=n_epochs, batch_size=batch_size, minibatch_fn=minibatch_fn)","repo_name":"ddinesan/Neuroscience","sub_path":"tutorials/W3D5_DeepLearning2/solutions/W3D5_Tutorial1_Solution_c7cecb55.py","file_name":"W3D5_Tutorial1_Solution_c7cecb55.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2337984890","text":"# month = input(\"введите номер месяца: \")\n# if month.isdigit() and 1 <= int(month) <= 12:\n# month = int(month)\n# if 3 <= month <= 5:\n# print(\"весна\")\n# elif 6 <= month <= 8:\n# print(\"лето\")\n# elif 9 <= month <= 11:\n# print(\"осень\")\n# else:\n# print(\"зима\")\n# else:\n# print(\"неправильно\")\n#\n#\n# h = int(input(\"часы:\"))\n# m = int(input(\"минуты:\"))\n# s = int(input(\"секунды\"))\n# if 0 <= h <= 23 and 0 <= m <= 59 and 0 <= s <= 59:\n# print(\"формат правильный\")\n# print(f\"{h}:{m}:{s}\")\n# else:\n# print(\"ошибка\")\n# if h > 23 or h < 0:\n# print(\"часы в формате 0-23\")\n# if m > 59 or m < 0:\n# print(\"минуты в формате 0-59\")\n# if s > 59 or s < 0:\n# print(\"секунды в формате 0-59\")\nq1 = input(\"Какого цвета трава?\\n\"\n \"а)пон б)мандарин в) геншен г) цвет шрека\\n\")\nscore = 0\nif q1 == \"г\":\n score = score + 10\n print(\"yes\")\nelse:\n print(\"no\")\n print(score)\n","repo_name":"Komarovdb/python","sub_path":"lesson_6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13212035417","text":"def pretraitement (n):\n tab=[0,0,0,0]\n i=0\n while i<=3:\n tab[i]=n//10**(3-i)\n n=n%10**(3-i)\n i=i+1\n return(tab)\n \n\ndef to_lettre (integer,a,b,c):\n result=''\n #if int(integer)!=integer:\n # break()\n \n if 5<=integer<=8:\n result=result+b\n integer=integer-5\n while 1<=integer<=3:\n result=result+a\n integer=integer-1\n if integer==4:\n result=result+a+b\n if integer==9:\n result=result+a+c\n return(result)\n \ndef to_roman(n):\n result=''\n tab=pretraitement(n)\n i=0\n while i<=3:\n if i==0:\n a='M'\n b=''\n c=''\n \n if i==1:\n a='C'\n b='D'\n c='M'\n if i==2:\n a='X'\n b='L'\n c='C'\n if i==3:\n a='I'\n b='V'\n c='X'\n \n integer=tab[i]\n result=result+to_lettre(integer,a,b,c)\n i+=1\n return(result)\n \n \n ","repo_name":"charransol/in104","sub_path":"roman1.py","file_name":"roman1.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4089777355","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass AccountJournal(models.Model):\n _inherit = \"account.journal\"\n\n @api.model\n def name_search(self, name='', args=None, operator='ilike', limit=100):\n args = args or []\n domain = []\n # if name:\n if self._context.get('izi_vm_journal', False):\n izi_vm_journal = self._context.get('izi_vm_journal')\n domain = [('id', 'in', [int(x) for x in izi_vm_journal.split(',')])]\n journals = self.search(domain + args, limit=limit)\n return journals.name_get()\n","repo_name":"butagreeza/korea_spa","sub_path":"addons_custom/izi_virtual_money/models/account_journal.py","file_name":"account_journal.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4421228392","text":"from collections import Counter\nimport numpy as np\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import precision_score, recall_score\n\nfrom tx.features import SetOfWords, FeatureVector, nvl\n\ndef find_features(wordsets, y, max_words=150):\n total_word_counts = Counter()\n tag_word_counts = Counter()\n for i, wc in enumerate(wordsets):\n total_word_counts.update(wc)\n if y[i]:\n tag_word_counts.update(wc)\n\n zscores = dict()\n n = sum(y)\n for w in tag_word_counts:\n p = float(total_word_counts[w])/len(y)\n zscores[w] = abs((tag_word_counts[w] - n*p)/np.sqrt(n*p*(1-p)))\n word_ranking = sorted([(-v, k) for k, v in zscores.iteritems()])\n features = [w for v, w in word_ranking if v < -6]\n if len(features) > max_words:\n features = features[0:max_words]\n features.sort()\n return features\n\n\nclass TaggingModel(object):\n '''\n An algorithm for tagging texts.\n Texts should be provided as a dictionary (word -> count).\n '''\n def __init__(self, tag_name, features):\n self.tag_name = tag_name\n self.fe = FeatureVector(features)\n self.clf = LinearSVC()\n \n def fit(self, X, y):\n '''X must be a matrix (i.e. with features already extracted)'''\n self.clf.fit(X, y)\n \n def predict_worddict(self, wd):\n '''Returns tag_name or None for a given worddict'''\n X = self.fe(wd)\n lbl = self.clf.predict(X)\n return self.tag_name if lbl else None\n\n\ndef train_tag_model(posts, tag_name):\n '''Helper function: Trains a TaggingModel and returns (model, accuracy, precision, recall)'''\n # Create X and y for the SVM:\n fe = lambda p: Counter(SET_OF_WORDS(p.all_text))\n X_wd = map(fe, posts)\n y = [tag_name in nvl(p.tags) for p in posts]\n \n # Find the informative words in X_wd\n features = find_features(X_wd, y)\n if len(features) == 0:\n return (None, 0, 0, 0)\n \n # Finally, make a matrix and train the model\n fv = FeatureVector(features)\n X = map(fv, X_wd)\n tm = TaggingModel(tag_name, features)\n tm.fit(X, y)\n y_hat = tm.clf.predict(X)\n return tm, tm.clf.score(X, y), precision_score(y, y_hat), recall_score(y, y_hat)\n ","repo_name":"konstantint/texata-finals-2014","sub_path":"tx/tagging.py","file_name":"tagging.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"15039024234","text":"# *-* coding: utf-8 *-*\nfrom flask import Blueprint, jsonify\nfrom psutil import Process, get_pid_list\nfrom ..auths import *\n\nprocess = Blueprint('process', __name__, \n template_folder='')\n\n@process.route('/process', methods=['GET'])\n@auth.login_required\ndef processs():\n lprocess = get_pid_list()\n lAllprocess=[]\n dprocess={}\n for pid in lprocess:\n pName=Process(pid).name()\n pMemoryPercent=Process(pid).memory_percent()\n pMemoryInfo=Process(pid).memory_info()\n pCpuAfinity=Process(pid).cpu_affinity()\n pGids=Process(pid).gids()\n pUids=Process(pid).uids()\n pUsername=Process(pid).username()\n try:\n pCwd=Process(pid).cwd()\n except:\n pCwd=''\n dprocess={'pid': pid,\n 'name': pName,\n 'Memory%': pMemoryPercent,\n 'Cpu Afinity': pCpuAfinity,\n 'CWD': pCwd,\n 'Memory info': pMemoryInfo,\n 'Gids': pGids,\n 'Uids': pUids,\n 'Username': pUsername}\n lAllprocess.append(dprocess)\n return jsonify(process=lAllprocess)\n\n\n@process.route('/process/children/', methods=['GET'])\n@auth.login_required\ndef get_children(pid):\n lchildren=[]\n pid = int(pid)\n if pid > 0:\n pchild=Process(pid)\n for child in pchild.children(recursive=True):\n lchildren.append({'name': child.name(), \n 'pid': child.pid,\n 'Memory%': child.memory_percent(),\n 'Cpu Afinity': child.cpu_affinity(),\n 'Gids': child.gids(),\n 'Uids': child.uids(),\n 'Username': child.username(),\n 'Uids': child.uids()})\n return jsonify(children=lchildren)\n","repo_name":"wesleyleite/core","sub_path":"core/process/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1316669898","text":"# here is a Python comment\r\na = 3 # integer\r\nb = 7.2 # float\r\n# primitive data types are int, float, boolean, None\r\nprint(a+b) # in Python 2 we can write print a+b\r\nprint(b/a) # also multiply *\r\nprint(b//a) \r\nprint(b%a) # modulo - is the left over\r\nprint(b**a) # raise to the power\r\n\r\n# variables do not have a fixed data type\r\na = 'hello' # string\r\nb = True # or False\r\n\r\n# collection data-types\r\nc = 'this is a string of characters' # strings are immutable\r\n# all indexed collections begin at zero\r\n# c[0] = 'T' # nope - str does not support assignment\r\nc = 'altered'\r\nprint(c[0]) # square brackets let us access a member of an indexed collection\r\nd = \"we can use single, double or tripple quotes\"\r\ne = '''tripple quotes let us use line breaks\r\nlike this so long lines can contain breaks'''\r\n# other collection types\r\n# lists are mutable indexed collections of any other type\r\nmy_list = [3, 2, 1] # square brackets indicte a list\r\nmy_list[1] = 'altered'\r\nmy_list.append(d)\r\nprint(my_list[1], my_list)\r\n# tuples are immutable indexed collections of any other type\r\nmy_tuple = (8, 7, 6.3, False, a) # round brackets indicate a tuple\r\nprint(my_tuple[0])\r\n# types\r\nprint(type(my_list), type(12.8), type(e), type(222), type(my_tuple[3]))\r\n# we will look at casting data types and access members of collections\r\nx = '42' # string\r\ny = 4.2 # float\r\nprint( float(x) + y ) # 46.2\r\n# we can receive input from the user\r\nq = input('Please enter a number ') # we ALWAYS receive a str from input\r\nq = int(float(q)) # take the int of the float of the str\r\nprint(q, type(q))","repo_name":"onionmccabbage/pythonIntroSept22","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32118533638","text":"import numpy as np\nimport cv2\n\n\n###############################################################################################\n# Create a function for contrast adjustment (histogram stretch/shrink)\n# First parameter: Input image that will be adjustment\n# Second parameter: New value as maximum\n# Third parameter: New value as minimum\ndef contrast_adjustment(image, new_min, new_max):\n [row, column, channel] = image.shape\n\n # Find the minimum and maximum intensity values across the image.\n old_min = np.amin(image, axis=(0, 1))\n old_max = np.amax(image, axis=(0, 1))\n\n new_image = np.zeros([row, column, channel], dtype=np.uint8)\n\n for k in range(channel):\n for i in range(row):\n for j in range(column):\n new_value = ((image[i, j, k] - old_min[k]) / (old_max[k] - old_min[k])) * (new_max - new_min) + new_min\n if new_value > 255:\n new_value = 255\n if new_value < 0:\n new_value = 0\n new_image[i, j, k] = new_value\n\n return new_image\n\n\n###############################################################################################\n\nimg = cv2.imread(\"Squidward.jpeg\")\ncontrast_img = contrast_adjustment(img, 50, 200)\n\ncv2.imshow('Original Image', img)\ncv2.imshow('Adjustment Image', contrast_img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"M7mdSh3banX/Image-Processing-Assignments","sub_path":"contrast-adjustment-algorithm.py","file_name":"contrast-adjustment-algorithm.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11479933586","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\nimport logging\n\nfrom invoker.utils import AliasDict\nfrom logging import StreamHandler\n\nLOGGER = logging.getLogger(__name__)\nsh = StreamHandler()\nsh.setLevel(logging.DEBUG)\nLOGGER.addHandler(sh)\nLOGGER.setLevel(logging.DEBUG)\n\n\nclass TestAliasDict(TestCase):\n\n def test_replace(self):\n\n config = AliasDict({\n 'test': {\n 'base': 'aaa/bbb',\n 'concat': \"${test.base}/ccc\"\n }\n })\n\n self.assertEqual(config.get('test.concat'), 'aaa/bbb/ccc')\n","repo_name":"636/dev-tips","sub_path":"invoker/invoker/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13030008587","text":"\"\"\"\nID: philoin1\nLANG: PYTHON3\nPROG: gift1\n\"\"\"\nf = open('gift1.in', 'r')\nw = open('gift1.out', 'w')\ndata = iter(f.read().split('\\n'))\nd = dict()\ntot = int(next(data))\nfor _ in range(tot):\n d[next(data)] = 0\nfor _ in range(tot):\n cur = next(data)\n [amount, num] = map(int,next(data).split())\n if num == 0:\n continue\n q, r = divmod(amount, num)\n d[cur] += r - amount\n for _ in range(num):\n d[next(data)] += q\n\nfor name in d.keys():\n w.write(\"%s %s\\n\"%(name,d[name]))","repo_name":"philoinovsky/USACO-Traning-Python3-Solution","sub_path":"chapter1/section1.2/gift1.py","file_name":"gift1.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"70368099949","text":"import numpy as np \nfrom scipy import signal as sigproc \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sns \n\ntest_file = 'analog_b_bpi_trial_1'\n\ndata = pd.read_csv(f'{test_file}.csv')\n\ntime = data['Time'].to_numpy()\nval = data['Values'].to_numpy()\n\nfc1 = 10 \nfc2 = 450 \nfc3 = 20 \n\n#filter parameters\nb10Hz, a10Hz = sigproc.butter(4,fc1,'high', analog=False, fs=2000) \nb450Hz, a450Hz = sigproc.butter(4,fc2,'low', analog=False, fs= 2000) \nb20Hz, a20Hz = sigproc.butter(2,fc3,'low', analog=False, fs=2000)\n\n#filter and envelope detector\nsig1 = sigproc.filtfilt(b10Hz,a10Hz,val) \nsig2 = sigproc.filtfilt(b450Hz,a450Hz, sig1) \nrs = np.abs(sig2)\n\n\n#post-processed signal\nsig_filt = sigproc.filtfilt(b20Hz,a20Hz,rs) \n\n\nwindow = 0.3 \nperiod = 1/2000 \nSD = []\n\nnsamp = int(window/period)\n\nsets = [sig_filt[n:n+nsamp] for n in range(0,len(sig_filt),nsamp)]\n\nSD = np.array([np.std(element) for element in sets])\nmean = np.array([np.mean(element) for element in sets])\n\nmin_SD_idx = np.argmin(np.min(SD))\nmean_min_idx = mean[min_SD_idx] \n\nh=3\n\nth = mean_min_idx + h*np.min(SD)\n\nonset_time = [] \noffset_time = []\n\ntest_sig = sig_filt #sigproc.savgol_filter(sig_filt,100,polyorder=7)\n\nfor idx in range (len(test_sig)): \n if (test_sig[idx] > th) and (len(onset_time) == len(offset_time)): \n onset = True \n for element in test_sig[idx:idx+25]:\n if element < th: \n onset = False \n if onset:\n onset_time.append(time[idx]) \n\n elif (test_sig[idx] < th) and (len(onset_time) > len(offset_time)): \n offset = True\n for element in test_sig[idx:idx+25]:\n if element > th: \n offset = False \n if offset:\n offset_time.append(time[idx]) \n\nepsilon = 3 #0.6 if BPI \n\nleast_onset = [] \nleast_offset = [] \n\nfor j in range (1,len(onset_time)): \n least = onset_time[j]\n if onset_time[j] - onset_time[j-1] < epsilon: \n continue \n else: \n least_onset.append(onset_time[j-1])\n\nfor j in range (1,len(offset_time)): \n least = offset_time[j]\n if offset_time[j] - offset_time[j-1] < epsilon: \n continue \n else: \n least_offset.append(offset_time[j])\n\nprint(least_onset)\nprint(least_offset)\n\nsns.set()\nsns.set_style(\"whitegrid\")\n\nfig, ax = plt.subplots(2,1)\nax[0].plot(time,val, color='y', label = 'DAC Output')\nax[0].set_title(f'Reconstructed EMG Signal, {test_file}')\nax[0].set_xlabel('Time (s)') \nax[0].set_ylabel('Voltage (V)')\nax[0].legend()\n\nax[1].plot(time,sig_filt, color='y', label = 'envelope')\nax[1].vlines(least_onset, ymin=0, ymax=1, color='b', label='onset')\nax[1].vlines(least_offset, ymin=0, ymax=1, color = 'r', label='offset')\nax[1].set_title(f'EMG Signal Envelope without Baseline Noise, {test_file}')\nax[1].set_xlabel('Time (s)') \nax[1].set_ylabel('Voltage (V)')\nax[1].legend()\nplt.plot()\nplt.tight_layout()\nplt.show()\n\nplt.figure('Superimposed Reconstructed Signal')\nplt.title(f'{test_file}')\nplt.plot(time, abs(sig2), color = 'c', label = 'Original Signal')\nplt.plot(time, sig_filt, color = 'y', label = 'Processed Signal') \nplt.vlines(least_onset, ymin=0, ymax=2, color='b', label='Onset Time')\nplt.vlines(least_offset, ymin=0, ymax=2, color = 'r', label='Offset Time')\nplt.xlabel('Time (s)')\nplt.ylabel('Voltage (V)')\nplt.legend(loc='upper right')\nplt.show()\n\n\n","repo_name":"emmanestallo/DataReconstruct_SigProc","sub_path":"Signal Processing/final_sigproc.py","file_name":"final_sigproc.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75192705706","text":"import cv2\nimport numpy as np\nfrom COMMUN import strel\nfrom COMMUN import mymorpho as mm\n\nim = cv2.imread('Assets/cat.jpg')\ncv2.imshow('Normal image', im)\n\nel = strel.build('disque', 3)\n\nimerosion = mm.myerosion(im, el)\ncv2.imshow('eroted image', imerosion)\n\nimGrad = 255-mm.mygradient(im,el)\ncv2.imshow('gradient image', imGrad)\n\nimDilate = mm.mydilatation(im, el)\ncv2.imshow('dilated image', imDilate)\ncv2.waitKey(0)\n\n","repo_name":"chekinih/Computer-vision-python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6739395007","text":"\nfrom Command.base import BaseCommand, CommandType\nfrom Conf.command import CMD_RES\nfrom Client.base import CLIENT_FLAG\nfrom Client.interfaces import IClient\nfrom Database.interfaces import IDatabase\n\n\nclass UnWatch(BaseCommand):\n\n need_kwargs = False\n cmd_type = CommandType.CMD_COMMON\n\n def handle(self, args, kwargs):\n UnWatch.unwatched_all_keys(self.client)\n self.client.flag &= ~CLIENT_FLAG.DIRTY_CAS\n self.client.flag &= ~CLIENT_FLAG.DIRTY_EXEC\n return CMD_RES.OK\n\n @staticmethod\n def unwatched_all_keys(client: IClient):\n transaction_manager = client.get_transaction_manager()\n for key, db in transaction_manager.get_watch_keys():\n db: IDatabase = db\n transaction_manager.remove_from_watch_key((key, db))\n\n watched_client_list = db.withdraw_watch_keys(key)\n watched_client_list.remove(client)\n\n if not watched_client_list:\n db.del_watch_key(key)\n","repo_name":"hc-tec/pydis","sub_path":"Command/commands/unwatch.py","file_name":"unwatch.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71360289387","text":"import base64\nimport sys\nimport os\nimport string\nfrom graphviz import Digraph\nfrom local_alignment import local_alignment\nfrom global_alignment import needleman_wunsch, setValues\nfrom semiglobal_alignment import semiglobal_alignment\nfrom graph import *\n\n# Global variables\nlevel = -1\nletter = 0\nLOW = 0\nFULL = 1\ndictionary = {}\n\ndef generateGraph(imagesFolderPath, XMLFileName, pathwayCompoundsGraph):\n g = Digraph('G', format='png')\n #if(horizontalGraph):\n # g.attr(rankdir='LR')\n for k in pathwayCompoundsGraph.keys():\n g.node(k)\n for v in pathwayCompoundsGraph[k]:\n g.edge(k, v)\n g.render(imagesFolderPath + XMLFileName)\n\ndef addToDictionary(path):\n for node in path:\n if not node in dictionary:\n global letter, level\n if letter % 26 == 0:\n level += 1\n dictionary[node] = string.ascii_uppercase[letter % 26] + str(level)\n letter += 1\n\ndef showDictionary():\n inv_dictionary = {v: k for k, v in dictionary.items()}\n sorted_dict = sorted(inv_dictionary.items(), key = operator.itemgetter(0))\n for tuple in sorted_dict:\n print(str(tuple[0]) + \"->\" + str(tuple[1]))\n\ndef renamedPath(path):\n renamed = []\n for node in path:\n renamed.append(dictionary[node])\n return renamed\n\ndef identify_equality(graph1, graph2, detail):\n outputList = []\n equality = False\n for node in graph1.get_nodes():\n for edge in node.get_edges():\n value1 = node.get_value()\n value2 = edge.get_value()\n if value1 != '*':\n if graph2.exists_edge(str(value1), str(value2)):\n equality = True\n outputList.append(str(value1) + \" -> \" + str(value2))\n return outputList\n\ndef identify_differences(graph1, graph2, detail):\n outputList = []\n differences = False\n for node in graph1.get_nodes():\n for edge in node.get_edges():\n value1 = node.get_value()\n value2 = edge.get_value()\n if value1 != '*':\n if not graph2.exists_edge(str(value1), str(value2)):\n differences = True\n outputList.append(str(value1) + \" -> \" + str(value2))\n return outputList\n","repo_name":"anthonylle/metabolic-pathways","sub_path":"python/AuxiliaryFunctions.py","file_name":"AuxiliaryFunctions.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29965686280","text":"import os, time, random\nfrom playsound import playsound as ps\n\nuser = os.getlogin()\nprint(user)\nexists = os.path.exists(f'/Users/{user}/Documentsrickroll.mp3')\nprint(exists)\nwhile True:\n if not exists:\n os.system(f'cp rickroll.mp3 /Users/{user}/Documents')\n time.sleep(random.randrange(1, 120))\n ps(f'/Users/{user}/Documents/rickroll.mp3')\n else:\n time.sleep(random.randrange(1, 120))\n ps(f'/Users/{user}/Documents/rickroll.mp3')","repo_name":"alessandroDeIturbe/rickrollforunix","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70561859628","text":"import os\n\n# the commission cost per trade\nCOMMISSION = 10\n\n# the default maximum dollar amount to allocate for a trade\nMAX_AMOUNT = 5000\n\n# the number of steps for scaling out of a position\nSCALE_OUT_LEVELS = 2 # sell half of quantity half way to the target price and the other half at the target price\n\n\n###################################################################\nDATA_DIR = os.path.join(os.environ['HOME'], '.pytradelib')\nZIPLINE_DIR = os.path.join(os.environ['HOME'], '.zipline')\nZIPLINE_CACHE_DIR = os.path.join(ZIPLINE_DIR, 'cache')\n\nLOG_DIR = os.path.join(DATA_DIR, 'logs')\nLOG_FILENAME = os.path.join(LOG_DIR, 'pytradelib.log')\nLOG_LEVEL = 'info' # debug, info, warning, error or critical\n\n\nif not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n\nif not os.path.exists(ZIPLINE_CACHE_DIR):\n os.makedirs(ZIPLINE_CACHE_DIR)\n","repo_name":"briancappello/PyTradeLib","sub_path":"pytradelib/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"37"} +{"seq_id":"19717561003","text":"####try:\r\n#### a=10\r\n#### b=10\r\n#### print(a+b+c)\r\n####except:\r\n#### print(\"done\")\r\n##try:\r\n## a=10\r\n## b=20\r\n## print(a+b+c)\r\n## print(a/0)\r\n##except ZeroDivisionError:\r\n## print('cannot divide with zero')\r\n##except NameError:\r\n## print(\"name issue,avoid\")\r\n##finally:\r\n## print(\"done\")\r\ntry:\r\n a=20\r\n if a<15:\r\n raise ValueError(\"give more than 15\")\r\n else:\r\n print(\"payment can be done\")\r\nexcept ValueError as error:\r\n print(error)\r\n \r\n","repo_name":"Raghava369/Python","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73509810026","text":"\"\"\"\nAirflow functional test DAG. Simple list generator and add 1. \n\"\"\"\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom airflow import DAG\n\nfrom corrent.decorators import operation\nfrom corrent.core import inject\n\n# Injecting corrent code to Airflow operators to enable functional API\ninject()\n\ndefault_args = {\n 'owner': 'Corrent',\n 'depends_on_past': False,\n 'start_date': datetime(2019, 12, 27),\n 'retries': 1,\n 'retry_delay': timedelta(minutes=1),\n}\n\nwith DAG(\n 'corrent_test', default_args=default_args, schedule_interval=None\n) as dag:\n\n @operation\n def generate_list(length: int = 5) -> List[int]:\n return list(range(length))\n\n @operation\n def add_one_list(int_list: List[int]) -> List[int]:\n return [i + 1 for i in int_list]\n\n @operation\n def print_result(result: List[int]) -> None:\n print(result)\n\n l = generate_list(10)\n print_generated = print_result.copy('print_generated')\n print_generated(l)\n l1 = add_one_list(l)\n print_result(l1)\n\n print_result << print_generated\n","repo_name":"casassg/corrent","sub_path":"dags/corrent_test.py","file_name":"corrent_test.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"36093570937","text":"from tinydb import Query\n\nimport utilities\nfrom utilities import Constants\n\n\nasync def clear(message, *args):\n \"\"\"\n A command to clear the list of a user's favorite or followed champs from the database\n ' ~clear pro, ~clear fav\n \"\"\"\n user_id = message.author.id\n db_type = ' '.join(args).lower()\n if db_type == 'pro':\n db = Constants.DB\n success_message = f\"Successfully cleared <@{user_id}>'s list of champions followed in professional games.\"\n elif db_type == 'skin':\n db = Constants.SKIN_DB\n success_message = f\"Successfully cleared <@{user_id}>'s list of champions followed in weekly skin sales.\"\n else:\n await message.channel.send(\n f\"Use **'{utilities.Constants.COMMAND_PREFIX}clear pro'** to clear your list of champions followed in \"\n f\"professional play, or **'{utilities.Constants.COMMAND_PREFIX}clear fav'** to clear your list of champions\"\n \" followed in the weekly skin sales rotation.\")\n return\n # Clear user's list of favorites/followed\n for champ_name in Constants.CHAMP_DICT.values():\n champion = Query()\n query_results = db.get(champion['champion_name'] == champ_name)\n if query_results is not None:\n user_ids_list = query_results['user_ids']\n if user_id in user_ids_list:\n user_ids_list.remove(user_id)\n db.update({'user_ids': user_ids_list}, champion['champion_name'] == champ_name)\n await message.channel.send(success_message)\n return\n","repo_name":"vanechaaale/zoe","sub_path":"Commands/ClearCommand.py","file_name":"ClearCommand.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10603367569","text":"# -*- coding: utf-8 -*-\n\nfrom math import pow\nfrom math import sqrt\nfrom math import factorial\nfrom collections import namedtuple\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\nimport scipy.cluster.hierarchy as sch\n\nInfo = namedtuple('Info', ['order', 'mean', 'stdev', 'min', 'max'])\nData = namedtuple('Data', ['workerNum', 'order', 'efficiency'])\nBestInfo = namedtuple('BestInfo',\n ['order', 'workerNum', 'stationNum', 'cf', 'r'])\n\n\ndef ProcessRow(data):\n data = data.split(',')\n speedList = data[0].split(' ')\n for i in range(len(speedList)):\n speedList[i] = float(speedList[i].strip())\n mean = float(data[1].strip())\n stdev = float(data[2].strip())\n _min = float(data[3].strip())\n _max = float(data[4].strip())\n\n return (speedList, mean, stdev, _min, _max)\n\n\ndef CalDistance(pointA, pointB):\n distance = 0.0\n for a, b in zip(pointA, pointB):\n distance += pow(a - b, 2)\n return sqrt(distance)\n\n\ndef DisplayBestInfo(bestList):\n for best in bestList:\n print(best.order, best.workerNum, best.stationNum, best.cf, best.r)\n\n\nif __name__ == \"__main__\":\n dataList = {}\n cnt = 0\n\n for r in [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:\n if r not in dataList:\n dataList[r] = {}\n\n for cf in [0.0, 0.1, 0.5, 0.9]:\n if cf not in dataList[r]:\n dataList[r][cf] = {}\n\n bestList = []\n\n resultPath = './ExperResult-20210120/result-r-%.1f-wc-cf-%.1f.csv' % (\n r, cf)\n with open(resultPath, 'r') as f:\n data = f.readlines()\n pos = 0\n\n for stationNum in range(3, 11, 2):\n if stationNum not in dataList[r][cf]:\n dataList[r][cf][stationNum] = {}\n\n for workerNum in range(2, stationNum):\n if r == 1.0:\n insNum = 1\n else:\n insNum = factorial(workerNum)\n best = []\n for i in range(insNum):\n (speedList, mean, stdev, _min,\n _max) = ProcessRow(data[pos + i])\n best.append(\n Info(order=speedList,\n mean=mean,\n stdev=stdev,\n min=_min,\n max=_max))\n pos += insNum\n cnt += 1\n best = sorted(best, key=lambda x: x.mean, reverse=True)\n average = sum([x.mean for x in best]) / len(best)\n\n if workerNum not in dataList[r][cf][stationNum]:\n dataList[r][cf][stationNum][workerNum] = best[0]\n else:\n print('Error %d %d %.1f %.1f' %\n (stationNum, workerNum, r, cf))\n exit(-1)\n\n for workerNum in range(2, 9):\n for r in [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:\n orderSet = set()\n for stationNum in range(3, 11, 2):\n if workerNum >= stationNum:\n continue\n\n for cf in [0.0, 0.1, 0.5, 0.9]:\n orderSet.add(\n tuple(dataList[r][cf][stationNum][workerNum].order))\n\n variables = []\n for i in range(workerNum):\n variables.append('v%d' % i)\n\n labels = []\n for e in orderSet:\n labels.append('%s' % str(e))\n\n matrix = []\n for e in orderSet:\n matrix.append(e)\n\n if len(matrix) <= 1:\n continue\n\n _flag = False\n for m in matrix:\n if len(m) <= 1:\n _flag = True\n break\n if _flag:\n continue\n\n print(matrix)\n\n df = pd.DataFrame(matrix, columns=variables, index=labels)\n\n disMat = sch.distance.pdist(df, 'euclidean')\n disMat = sch.distance.squareform(disMat)\n Z = sch.linkage(disMat, method='average')\n\n fig = plt.gcf()\n fig.set_size_inches(30, 30 * 0.7518796992481203)\n ax = fig.add_subplot(111)\n P = sch.dendrogram(Z)\n xLabel = ax.get_xticklabels()\n _orderSet = list(orderSet)\n print('---------- %d ----------' % workerNum)\n newXLabel = []\n\n for x in xLabel:\n tmp = x.get_text()\n print(int(tmp), type(tmp))\n print(_orderSet[int(tmp)])\n _label = ''\n for _ in _orderSet[int(tmp)]:\n _label += '%.2f, ' % _\n newXLabel.append(_label[:-2])\n\n fontsize = 20\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n plt.title('Clustering of %d workers, r = %.1f' %\n (workerNum, 1.0 / r),\n fontsize=fontsize)\n if workerNum >= 4:\n ax.set_xticklabels(newXLabel,\n rotation=15,\n horizontalalignment='right')\n else:\n ax.set_xticklabels(newXLabel,\n rotation=0,\n horizontalalignment='right')\n plt.savefig('./plot_dendrogram-%d-%.1f.png' % (workerNum, 1.0 / r))\n cluster = sch.fcluster(Z, 1, criterion='maxclust')\n plt.close()\n","repo_name":"JokerHB/DiscreteBucketBrigade","sub_path":"DiscreteBucketBrigade/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71543146669","text":"import pandas as pd\nfrom networkx.readwrite import json_graph\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.feature_extraction.text import HashingVectorizer,CountVectorizer,TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split, cross_val_score, ShuffleSplit, GridSearchCV\nfrom sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\n\nimport math\n\n## Train and test model with first approach\n\ndef train_and_test_model(algorithm=LinearSVC(),verbose=True,vect_type=\"hash\"):\n\n x_data=pd.read_json('./dataset/noduplicatedataset.json',lines=True)\n \n X_all = x_data.loc[:,'lista_asm']\n Y_all = x_data.loc[:,'semantic']\n\n #Filter General Purpose register name\n registers = [ 'rax','eax','ax',\n 'rbx','ebx','bx',\n 'rcx','ecx','cx',\n 'rdx','edx','dx',\n 'rbp','rbp','bp',\n 'rdi','edi','di',\n 'rsi','esi', 'si']\n\n if vect_type == \"hash\": \n vectorizer = HashingVectorizer(stop_words=registers)\n X_all = vectorizer.transform(X_all)\n elif vect_type == \"count\":\n vectorizer = CountVectorizer(stop_words=registers)\n X_all = vectorizer.fit_transform(X_all)\n elif vect_type == \"tfid\":\n vectorizer = TfidfVectorizer(stop_words=registers)\n X_all = vectorizer.fit_transform(X_all)\n else:\n raise RuntimeError(\"Supported Vectorizer Types : hash,count,tfid\")\n\n X_train, X_test, y_train, y_test = train_test_split(X_all, Y_all, test_size=0.3, \n random_state=42)\n clf = algorithm\n clf.fit(X_train,y_train)\n pred = clf.predict(X_test)\n score = metrics.accuracy_score(y_test, pred)\n if verbose:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,))\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n return clf,score,vectorizer\n\n\n## Evaluate myster set with the trained model\n\ndef evaluate_mystery_set(clf,vect=None):\n x_data=pd.read_json('./dataset/blindtest.json',lines=True)\n \n X_test = vect.transform(x_data.loc[:,'lista_asm'])\n\n y = clf.predict(X_test)\n return y\n\n","repo_name":"IlKaiser/ML-H1","sub_path":"first_approach.py","file_name":"first_approach.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26382657329","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 11:53:52 2020\n\n@author: Doug\n\"\"\"\n\n\nimport os\n\n\ndirPath = \"C:\\\\Users\\\\Doug\\\\Repositories\\\\PracticalProgramming\\\\Session 7 - Python\\\\Instructional Material\\\\Class Examples\\\\File Access Examples\\\\\"\nthisIsAList = [\"a\",\"B\"]\n\nfor container in os.scandir(dirPath): \n \n if (os.path.isfile(container)):\n print(container,\" is a file\")\n\n elif (os.path.isdir(container)):\n print(container,\" is a folder\")\n\n \n\n \n\n# Working with files\n\ntextLines = []\n\nwith open(dirPath + \"Part Stock.csv\") as fileHandle: \n\n for fileLine in fileHandle.readlines():\n cleanedLine = fileLine.strip()\n \n textLines.append(cleanedLine)\n \n print(cleanedLine)\n \n userInput = input(\"Stop\")\n\nprint(textLines)\n \n\n\n\nfor listItem in textLines:\n print(listItem)\n \n \n \n \n ","repo_name":"dbowmans46/PracticalProgramming","sub_path":"Session 7 - Python/Instructional Material/Class Examples/File Access Examples/File Access Tutorial.py","file_name":"File Access Tutorial.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42346935834","text":"from difflib import SequenceMatcher\nfrom somsiad import Somsiad, SomsiadMixin\nfrom typing import cast\nfrom sentry_sdk import capture_exception\nimport discord\nfrom discord.ext import commands\n\nfrom core import cooldown\nfrom utilities import human_amount_of_time\n\n\nclass Spotify(commands.Cog, SomsiadMixin):\n FOOTER_TEXT = 'Spotify'\n FOOTER_ICON_URL = (\n 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/19/'\n 'Spotify_logo_without_text.svg/60px-Spotify_logo_without_text.svg.png'\n )\n\n @cooldown()\n @commands.command()\n @commands.guild_only()\n async def spotify(self, ctx, member: discord.Member = None):\n member = member or ctx.author\n spotify_activity = cast(\n discord.Spotify,\n discord.utils.find(lambda activity: isinstance(activity, discord.Spotify), member.activities),\n )\n if spotify_activity is None:\n address = 'nie słuchasz' if member == ctx.author else f'{member.display_name} nie słucha'\n embed = self.bot.generate_embed('⏹', f'W tym momencie {address} niczego na Spotify')\n else:\n embed = self.bot.generate_embed(\n '▶️',\n spotify_activity.title,\n url=f'https://open.spotify.com/go?uri=spotify:track:{spotify_activity.track_id}',\n )\n embed.set_thumbnail(url=spotify_activity.album_cover_url)\n embed.add_field(name='W wykonaniu', value=', '.join(spotify_activity.artists))\n embed.add_field(name='Z albumu', value=spotify_activity.album)\n embed.add_field(name='Długość', value=human_amount_of_time(spotify_activity.duration.total_seconds()))\n # search for the song on YouTube\n youtube_search_query = f'{spotify_activity.title} {\" \".join(spotify_activity.artists)}'\n try:\n youtube_search_result = self.bot.youtube_client.search(youtube_search_query)\n except:\n capture_exception()\n else:\n # add a link to a YouTube video if a match was found\n if (\n youtube_search_result is not None\n and SequenceMatcher(None, youtube_search_query, youtube_search_result.title).ratio() > 0.25\n ):\n embed.add_field(name='Posłuchaj na YouTube', value=youtube_search_result.url, inline=False)\n embed.set_image(url=youtube_search_result.thumbnail_url)\n embed.set_footer(text=self.FOOTER_TEXT, icon_url=self.FOOTER_ICON_URL)\n await self.bot.send(ctx, embed=embed)\n\n\nasync def setup(bot: Somsiad):\n await bot.add_cog(Spotify(bot))\n","repo_name":"Twixes/somsiad","sub_path":"plugins/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"30848017045","text":"from player import *\r\nfrom pathlib import Path\r\nimport pygame, os\r\n\r\n#Function - Checks Collision\r\ndef collision_check(player, objects, counter):\r\n if player.rect.colliderect(objects.body1_rect):\r\n return True\r\n\r\n# Not sure if i'm going to keep this in\r\ndef front_layer(player, screen_res, surface1, surface2):\r\n surface1.blit(surface2, (0 - player.scroll[0], 0 - player.scroll[1]))\r\n #pygame.draw.circle(surface1, (98,19,18), (0+radius, screen_res[1] - radius), radius, width=0)\r\n #pygame.draw.rect(surface1, player.HP_color, (0, screen_res[1] - 200, player.current_HP, 200))\r\n\r\ndef load_sprite_list(enemy_id, action, player=False):\r\n if player:\r\n relative_path = Path().absolute().as_posix()\r\n sprite_list = [pygame.image.load(\"{}\".format(relative_path) + \"/\" + enemy_id + \"/image/{}\".format(i)).convert_alpha()\r\n for i in os.listdir(\"{}\".format(relative_path) + \"/\" + enemy_id + \"/image/\") if i.startswith(\"{}\".format(action))]\r\n else:\r\n relative_path = Path().absolute().as_posix()\r\n sprite_list = [pygame.image.load(\"{}\".format(relative_path) + \"/boss/\" + enemy_id + \"/image/{}\".format(i)).convert_alpha()\r\n for i in os.listdir(\"{}\".format(relative_path) + \"/boss/\" + enemy_id + \"/image/\") if i.startswith(\"{}\".format(action))]\r\n return sprite_list\r\n\r\n#Function - Normalize width and height so sprites blit pos are synonymous\r\ndef sprite_offset(sprites, sprites2=[]):\r\n x_offset, y_offset = [], []\r\n if len(sprites2) == 0:\r\n for sprite in sprites:\r\n if sprite.get_width() > sprites[0].get_width():\r\n x_offset.append(sprite.get_width() - sprites[0].get_width())\r\n elif sprite.get_width() < sprites[0].get_width():\r\n x_offset.append(-1*(sprites[0].get_width() - sprite.get_width()))\r\n else:\r\n x_offset.append(0)\r\n\r\n for sprite in sprites:\r\n if sprite.get_height() != sprites[0].get_height():\r\n y_offset.append(sprite.get_height() - sprites[0].get_height())\r\n else:\r\n y_offset.append(0)\r\n\r\n return x_offset, y_offset\r\n else:\r\n for sprite in sprites2:\r\n if sprite.get_width() > sprites[0].get_width():\r\n x_offset.append(sprite.get_width() - sprites[0].get_width())\r\n elif sprite.get_width() < sprites[0].get_width():\r\n x_offset.append(-1*(sprites[0].get_width() - sprite.get_width()))\r\n else:\r\n x_offset.append(0)\r\n\r\n for sprite in sprites2:\r\n if sprite.get_height() != sprites[0].get_height():\r\n y_offset.append(sprite.get_height() - sprites[0].get_height())\r\n else:\r\n y_offset.append(0)\r\n\r\n return x_offset, y_offset\r\n","repo_name":"oldmanholmes/Learning-Game-Development-via-Pygame","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7842561382","text":"lang_dict = {\n 'main menu': ['Asosiy menyu 🏠', 'Главное меню 🏠'],\n \n 'take ques': ['🧾 So\\'rovnomada qatnashish', '🧾 Пройти опросник'],\n\n 'change lang': [\"\\U0001F1FA\\U0001F1FF Tilni o'zgartirish \\U0001F1F7\\U0001F1FA\", \"\\U0001F1FA\\U0001F1FF Сменить язык \\U0001F1F7\\U0001F1FA\"],\n\n 'select lang': [\"\"\" Tilni tanlang \"\"\", \"\"\"Выберите язык бота \"\"\"],\n \n 'type name': [\"\"\"Ismingizni kiriting \"\"\", \"\"\"Введите ваше имя \"\"\"],\n \n 'send number': [\"\"\"Telefon raqamingizni yuboring \"\"\", \"\"\"Оставьте свой номер телефона \"\"\"],\n\n 'leave number': ['Telefon raqamni yuborish', 'Оставить номер телефона'],\n \n 'back': [\"\"\"🔙 Ortga\"\"\", \"\"\"🔙 Назад\"\"\"],\n \n 'next': [\"\"\"Davom etish ➡️\"\"\", \"\"\"Далее ➡️\"\"\"],\n \n 'click all': [\"\"\"Barcha variantlar uchun javobni tanlang! \"\"\", \"\"\"Обязательный ответ для каждого варианта \"\"\"],\n \n 'if other': [\"\"\"Variantlar ichida bo'lmasa, o'zingiz kiriting \"\"\", \"\"\"Если другой. Уточните __________ \"\"\"],\n \n 'send again': [\"\"\"Variantlar ichidan tanlang \"\"\", \"\"\"Выберите один из вариантов \"\"\"],\n \n 'stop answering': [\"\"\"So'rovnoma yakunlandi. Ishtirok etganingiz uchun raxmat! \"\"\", \"\"\"Опрос завершён. Спасибо за участие! \"\"\"],\n \n 'error date': [\"\"\"Iltimos oy.yil (03.2003) shaklida yozing \"\"\", \"\"\"Напишите, пожалуйста, в формате мм.йййй (03.2003) \"\"\"],\n \n 'city_chirchik': [\"\"\"Chirchiq\"\"\", \"\"\"Чирчик\"\"\"],\n \n 'city_gulistan': [\"\"\"Guliston\"\"\", \"\"\"Гулистан\"\"\"],\n \n 'your answers': [\"\"\"Sizning javoblaringiz:\"\"\", \"\"\"Ваш ответы:\"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n '': [\"\"\" \"\"\", \"\"\" \"\"\"],\n \n \n}","repo_name":"xiidot1303/imzobot","sub_path":"bot/uz_ru.py","file_name":"uz_ru.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2371542640","text":"import numpy as np\nimport time\nimport random\nfrom renderer.rendererFactory import createRenderer\nfrom dataclasses import dataclass\n\n# Kolory przmieszczające się od góry zapętlone\n\n# 1. wczytaj pozycja lampek choinkowych\npoints = np.loadtxt(\"raw.txt\", delimiter=\",\")\n# 2. utówrz renderer\nrenderer = createRenderer()\n\n# 3. zaimplementów swój program\n\n\nzMin = points.min(axis=0)[2]\nzMax = points.max(axis=0)[2]\nzH = zMax - zMin\n\nbandsTop = zMin\nstep = (zMax - zMin) / 30\n\n\n@dataclass\nclass Band(object):\n zFrom: float\n zTo: float\n color: list\n\n def contains(self, z):\n return z >= self.zFrom and z < self.zTo\n\n def moveBy(self, step):\n self.zFrom += step\n self.zTo += step\n\nclass AllBand(object):\n\n def __init__(self, size, bandHight):\n self.bands = []\n self.bandHight = bandHight\n for i in range(size):\n self.bands.append(Band(bandsTop - (i + 1) * self.bandHight, bandsTop - (i) * self.bandHight, self.randomColor()))\n\n def randomColor(self):\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n return [r, g, b]\n\n def getColor(self, x):\n for b in self.bands:\n if b.contains(x):\n return b.color\n\n return [0, 0, 0]\n\n def moveBy(self, step):\n for b in self.bands:\n b.moveBy(step)\n\n if self.bands[0].zFrom > zMax:\n first = self.bands.pop(0)\n first.zFrom = self.bands[-1].zFrom - self.bandHight\n first.zTo = self.bands[-1].zFrom\n self.bands.append(first)\n\n\nallBands = AllBand(100, zH / 10)\n\nwhile True:\n npColors = np.zeros((len(points), 3))\n\n for i in range(0, len(points)):\n z = points[i][2]\n npColors[i] = allBands.getColor(z)\n\n # 4. wyślij dane na choinkę lub symulator\n renderer.render256(points, npColors)\n\n allBands.moveBy(step)\n","repo_name":"krystiankaluzny/light_designer","sub_path":"anim4.py","file_name":"anim4.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21433818603","text":"from sys import platform\nif platform == 'win32':\n DESKTOPdir = '//Desktop4'\n #DATADESKTOPdir = '//Desktop2'\n DATADESKTOPdir = 'T:/'\n taskdir = DESKTOPdir+'/Dtensorflow\\LiChen\\VW\\PolarReg'\n MODELDESKTOPdir = '//Desktop2'\nelse:\n #ubuntu\n DESKTOPdir = '/mnt/desktop4'\n #DATADESKTOPdir = '/mnt/desktop2'\n DATADESKTOPdir = '/mnt/V'\n taskdir = '/home/li/pycharm'\n MODELDESKTOPdir = '/mnt/desktop2'\n","repo_name":"clatfd/PolarReg","sub_path":"PolarVW/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18328770427","text":"from base_jsonl_converter import JSONLConverter\nimport masktools\nimport pycocotools.mask as mask\nimport json\n\n\nclass COCOJSONLConverter(JSONLConverter):\n \"\"\"\n Class for converting COCO data for object detection and instance segmentation into jsonl files\n ...\n Attributes\n ---------\n base_url : str\n the base for the image_url to be written into the jsonl file\n coco_file : str\n file containing coco annotations\n compressed_rle : bool\n flag indicating if coco segmentation annotations are stored in comprssed rle format\n \"\"\"\n\n def __init__(self, base_url, coco_file, compressed_rle=False):\n super().__init__(base_url=base_url)\n self.categories = {}\n self.compressed_rle = compressed_rle\n with open(coco_file) as f_in:\n self.coco_data = json.load(f_in)\n self.image_id_to_data_index = {}\n for i in range(0, len(self.coco_data[\"images\"])):\n self.jsonl_data.append({})\n self.jsonl_data[i][\"image_url\"] = \"\"\n self.jsonl_data[i][\"image_details\"] = {}\n self.jsonl_data[i][\"label\"] = []\n for i in range(0, len(self.coco_data[\"categories\"])):\n self.categories[self.coco_data[\"categories\"][i][\"id\"]] = self.coco_data[\n \"categories\"\n ][i][\"name\"]\n\n def convert(self):\n \"\"\"\n Generate jsonl data for object detection or instance segmentation\n\n return: list of lines for jsonl\n rtype: List \n\n \"\"\"\n for i in range(0, len(self.coco_data[\"images\"])):\n self._populate_image_url(i, self.coco_data[\"images\"][i])\n self._populate_image_details(i, self.coco_data[\"images\"][i])\n for i in range(0, len(self.coco_data[\"annotations\"])):\n self._populate_label(self.coco_data[\"annotations\"][i])\n return self.jsonl_data\n\n def _populate_image_url(self, index, coco_image):\n \"\"\"\n populates image url for jsonl entry\n\n Parameters:\n index (int): image entry index\n coco_image (dict): image entry from coco data file\n \"\"\"\n image_url = coco_image[\"file_name\"]\n self.jsonl_data[index][\"image_url\"] = (\n self.base_url + image_url[image_url.rfind(\"/\") + 1 :]\n )\n self.image_id_to_data_index[coco_image[\"id\"]] = index\n\n def _populate_image_details(self, index, coco_image):\n \"\"\"\n populates image details for jsonl entry\n\n Parameters:\n index (int): image entry index\n coco_image (dict): image entry from coco data file\n return: list of lines for jsonl\n \"\"\"\n file_name = coco_image[\"file_name\"]\n self.jsonl_data[index][\"image_details\"][\"format\"] = file_name[\n file_name.rfind(\".\") + 1 :\n ]\n self.jsonl_data[index][\"image_details\"][\"width\"] = coco_image[\"width\"]\n self.jsonl_data[index][\"image_details\"][\"height\"] = coco_image[\"height\"]\n\n def _populate_label(self, annotation):\n \"\"\"\n populates label entry for object detection or instance segmentation\n\n Parameters:\n annotation (dict): annotation entry from coco data file\n \"\"\"\n index = self.image_id_to_data_index[annotation[\"image_id\"]]\n image_details = self.jsonl_data[index][\"image_details\"]\n label = {\"label\": self.categories[annotation[\"category_id\"]]}\n # check if object detection or instance segmentation\n if (\n \"segmentation\" not in annotation.keys()\n or len(annotation[\"segmentation\"]) == 0\n ):\n self._populate_bbox_in_label(label, annotation, image_details)\n else:\n self.__populate_segmentation_in_label(label, annotation, image_details)\n self._populate_isCrowd(label, annotation)\n self.jsonl_data[index][\"label\"].append(label)\n\n def _populate_bbox_in_label(self, label, annotation, image_details):\n \"\"\"\n populates bounding box in label entry for object detection\n\n Parameters:\n label (dict): label to populate for jsonl entry\n annotation (dict): annotation entry from coco data file\n \"\"\"\n # if bbox comes as normalized, skip normalization.\n if max(annotation[\"bbox\"]) < 1.5:\n width = 1\n height = 1\n else:\n width = image_details[\"width\"]\n height = image_details[\"height\"]\n label[\"topX\"] = annotation[\"bbox\"][0] / width\n label[\"topY\"] = annotation[\"bbox\"][1] / height\n label[\"bottomX\"] = (annotation[\"bbox\"][0] + annotation[\"bbox\"][2]) / width\n label[\"bottomY\"] = (annotation[\"bbox\"][1] + annotation[\"bbox\"][3]) / height\n\n def __populate_segmentation_in_label(self, label, annotation, image_details):\n \"\"\"\n populates polygon segmentation in label entry for instance segmentation\n\n Parameters:\n label (dict): label to populate for jsonl entry\n annotation (dict): annotation entry from coco data file\n image_details (dict): image details from coco data file\n \"\"\"\n # if bbox comes as normalized, skip normalization.\n if max(annotation[\"bbox\"]) < 1.5:\n width = 1\n height = 1\n else:\n width = image_details[\"width\"]\n height = image_details[\"height\"]\n\n polygons = []\n if (\n type(annotation[\"segmentation\"]) is dict\n ): # segmentations are in uncompressed rle format\n rle = annotation[\"segmentation\"]\n if self.compressed_rle:\n compressed_rle = rle\n else:\n compressed_rle = mask.frPyObjects(rle, rle[\"size\"][0], rle[\"size\"][1])\n polygons = masktools.convert_mask_to_polygon(compressed_rle)\n else: # segmentation is list of vertices\n for segmentation in annotation[\"segmentation\"]:\n polygon = []\n # loop through vertices:\n for id, vertex in enumerate(segmentation):\n if (id % 2) == 0:\n # x-coordinates (even index)\n x = vertex / width\n polygon.append(x)\n\n else:\n y = vertex / height\n polygon.append(y)\n polygons.append(polygon)\n label[\"polygon\"] = polygons\n\n def _populate_isCrowd(self, label, annotation):\n \"\"\"\n populates iscrowd in label entry for object detection and instance segmentation\n\n Parameters:\n label (dict): label to populate for json entry\n annotation (dict): annotation entry from coco data file\n \"\"\"\n if \"iscrowd\" in annotation.keys():\n label[\"isCrowd\"] = annotation[\"iscrowd\"]\n","repo_name":"Azure/azureml-examples","sub_path":"sdk/python/jobs/automl-standalone-jobs/jsonl-conversion/coco_jsonl_converter.py","file_name":"coco_jsonl_converter.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":1362,"dataset":"github-code","pt":"37"} +{"seq_id":"26875626795","text":"from decimal import Decimal\nfrom pydantic import BaseModel\n\n\nclass PlayerDpsAllStats(BaseModel):\n dps: Decimal\n damage: Decimal\n condiDps: Decimal\n condiDamage: Decimal\n powerDps: Decimal\n powerDamage: Decimal\n breakbarDamage: Decimal\n actorDps: Decimal\n actorDamage: Decimal\n actorCondiDps: Decimal\n actorCondiDamage: Decimal\n actorPowerDps: Decimal\n actorPowerDamage: Decimal\n actorBreakbarDamage: Decimal\n\n\ndef get_stub_player_dps_all_stats(\n dps=52.342,\n damage=52.342,\n condiDps=52.342,\n condiDamage=52.342,\n powerDps=52.342,\n powerDamage=52.342,\n breakbarDamage=52.342,\n actorDps=52.342,\n actorDamage=52.342,\n actorCondiDps=52.342,\n actorCondiDamage=52.342,\n actorPowerDps=52.342,\n actorPowerDamage=52.342,\n actorBreakbarDamage=52.342,\n):\n return PlayerDpsAllStats(\n dps=dps,\n damage=damage,\n condiDps=condiDps,\n condiDamage=condiDamage,\n powerDps=powerDps,\n powerDamage=powerDamage,\n breakbarDamage=breakbarDamage,\n actorDps=actorDps,\n actorDamage=actorDamage,\n actorCondiDps=actorCondiDps,\n actorCondiDamage=actorCondiDamage,\n actorPowerDps=actorPowerDps,\n actorPowerDamage=actorPowerDamage,\n actorBreakbarDamage=actorBreakbarDamage,\n )\n","repo_name":"Ariacell/PyGw2Agg","sub_path":"pygw2agg/models/ei_output/player_dps_all_stats.py","file_name":"player_dps_all_stats.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9110229362","text":"def PhedexQuality_Manual():\n from Monstr.Modules.PhedexQuality.PhedexQuality import PhedexQuality, DB\n test_obj = PhedexQuality()\n # This workaround is required for TravisCI only.\n test_obj.status_schema = {'status': (DB.Column('id', DB.Integer, primary_key=True),\n DB.Column('name', DB.String(64)),\n DB.Column('status', DB.Integer),\n DB.Column('time', DB.DateTime(True)),\n DB.Column('description', DB.Text),)}\n\n test_obj.Initialize()\n params = test_obj.PrepareRetrieve()\n data = test_obj.Retrieve(params)\n test_obj.InsertToDB(data)\n events = test_obj.Analyze(data)\n test_obj.React(data)\n\n\ndef test_PhedexQuality_initial():\n PhedexQuality_Manual()\n\n\ndef test_PhedexQuality_update():\n PhedexQuality_Manual()\n\n\ndef test_RESTs():\n from Monstr.Modules.PhedexQuality.PhedexQuality import PhedexQuality\n obj = PhedexQuality()\n obj.Initialize()\n for rest_name in obj.rest_links:\n obj.rest_links[rest_name]({})\n","repo_name":"tier-one-monitoring/monstr","sub_path":"Monstr/Modules/PhedexQuality/test_PhedexQuality.py","file_name":"test_PhedexQuality.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17715673862","text":"try:\n from import_export.admin import ImportExportModelAdmin as ModelAdmin\nexcept ImportError:\n from django.contrib.admin import ModelAdmin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin, GroupAdmin as DjangoGroupAdmin\nfrom django.contrib.auth.forms import UserChangeForm as DjangoUserChangeForm, UserCreationForm as DjangoUserCreationForm\nfrom django.contrib.sites.admin import SiteAdmin as DjangoSiteAdmin\nfrom django.contrib import admin\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ampcms.models import Module, Page, Pagelet, PageletAttribute, User, Group, AmpCmsSite, PageDetails, PageletDetails, ModuleDetails\nfrom ampcms.conf import settings\n\nclass SiteAdmin(DjangoSiteAdmin):\n list_display = ('domain', 'name', 'skin', 'private')\n list_filter = ('private',)\n\nclass UserChangeForm(DjangoUserChangeForm):\n class Meta:\n model = User\n\nclass UserCreationForm(DjangoUserCreationForm):\n class Meta:\n model = User\n\nclass UserAdmin(DjangoUserAdmin):\n fieldsets = (\n (None,\n {'fields': ('username', 'password')}),\n (_('Personal Info'),\n {'fields': ('first_name', 'last_name', 'email')}),\n (_('Page Permissions'),\n {'classes': ('collapse',),\n 'fields': ('acl_pages', 'acl_pagelets')}),\n (_('Other Permissions'),\n {'classes': ('collapse',),\n 'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions')}),\n (_('Important Dates'),\n {'classes': ('collapse',),\n 'fields': ('last_login', 'date_joined')}),\n (_('Groups'),\n {'fields': ('groups',)}),\n )\n filter_horizontal = ('user_permissions', 'acl_pages', 'acl_pagelets', 'groups')\n form = UserChangeForm\n add_form = UserCreationForm\n\nclass GroupAdmin(DjangoGroupAdmin):\n filter_horizontal = ('permissions', 'acl_pages', 'acl_pagelets')\n\nclass ModuleDetailsInline(admin.TabularInline):\n model = ModuleDetails\n extra = 0\n\nclass PageDetailsInline(admin.TabularInline):\n model = PageDetails\n extra = 0\n\nclass PageletDetailsInline(admin.TabularInline):\n model = PageletDetails\n extra = 0\n\nclass PageInline(admin.TabularInline):\n model = Page\n extra = 0\n readonly_fields = ['admin_link']\n \nclass PageletInline(admin.TabularInline):\n model = Pagelet\n extra = 0\n readonly_fields = ['admin_link']\n exclude = ['classes']\n\nclass PageletAttributeInline(admin.TabularInline):\n model = PageletAttribute\n\nclass ModuleForm(forms.ModelForm):\n model = Module\n\nclass ModuleAdmin(ModelAdmin):\n fieldsets = (\n ('Names', {'fields': ('name', 'icon')}),\n ('Other', {'fields': ('site', 'order', 'active', 'show_in_navigation')}),\n ('Redirects', {'classes': ('collapse',),\n 'fields': ('redirect_module', 'redirect_url')}))\n list_display = ('name', 'title', 'active', 'order', 'site', 'view_on_site')\n list_filter = ('active', 'site')\n list_editable = ('order',)\n inlines = [ModuleDetailsInline, PageInline]\n actions = ['activate', 'deactivate']\n ordering = ['site__domain', 'name']\n \n class Media:\n js = (\n 'https://ajax.googleapis.com/ajax/libs/jquery/1.7.0/jquery.min.js',\n 'https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/jquery-ui.min.js',)\n css = {\n 'all': ('ampcms/css/admin-extended.css',)\n }\n \n def activate(self, request, queryset):\n rows_updated = queryset.update(active=True)\n if rows_updated == 1:\n msg = '1 module was activated'\n else:\n msg = '%s modules were activated' % rows_updated\n self.message_user(request, msg)\n activate.short_description = 'Activate selected modules'\n \n def deactivate(self, request, queryset):\n rows_updated = queryset.update(active=False)\n if rows_updated == 1:\n msg = '1 module was activated'\n else:\n msg = '%s modules were activated' % rows_updated\n self.message_user(request, msg)\n deactivate.short_description = 'Deactivate selected modules'\n \n def view_on_site(self, obj):\n if obj.active:\n return 'View on Site' % (obj.site, obj.name)\n else:\n return ''\n view_on_site.allow_tags = True\n\nclass PageAdmin(ModelAdmin):\n fieldsets = (\n ('Names', {'fields': ('name', 'icon')}),\n ('Objects', {'fields': ('module', 'page_class',)}),\n ('Other', {'fields': ('order', 'private', 'active', 'show_in_navigation')}))\n list_display = ('full_name', 'name', 'title', 'site', 'module', 'page_class', 'active', 'order', 'view_on_site')\n list_filter = ('active', 'module__site', 'module', 'page_class')\n list_editable = ('order',)\n inlines = [PageDetailsInline, PageletInline]\n actions = ['activate', 'deactivate']\n ordering = ['module__site__domain', 'module__name', 'name']\n \n class Media:\n js = (\n 'https://ajax.googleapis.com/ajax/libs/jquery/1.7.0/jquery.min.js',\n 'https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/jquery-ui.min.js',)\n css = {\n 'all': ('ampcms/css/admin-extended.css',)\n }\n \n def site(self, obj):\n return obj.module.site\n site.short_description = 'Site'\n \n def activate(self, request, queryset):\n rows_updated = queryset.update(active=True)\n if rows_updated == 1:\n msg = '1 page was activated'\n else:\n msg = '%s pages were activated' % rows_updated\n self.message_user(request, msg)\n activate.short_description = 'Activate selected pages'\n \n def deactivate(self, request, queryset):\n rows_updated = queryset.update(active=False)\n if rows_updated == 1:\n msg = '1 page was activated'\n else:\n msg = '%s pages were activated' % rows_updated\n self.message_user(request, msg)\n deactivate.short_description = 'Deactivate selected pages'\n \n def full_name(self, obj):\n return '%s.%s' % (obj.module.name, obj.name)\n full_name.short_description = 'Page'\n \n def view_on_site(self, obj):\n if obj.active:\n return 'View on Site' % (obj.module.site, obj.module.name, obj.name)\n else:\n return ''\n view_on_site.allow_tags = True\n\nclass PageletAdmin(ModelAdmin):\n fieldsets = (\n ('Names', {'fields': ('name',)}),\n ('Objects', {'fields': ('page', 'pagelet_class', 'application', 'starting_url', 'classes')}),\n ('Other', {'fields': ('order', 'active')}))\n list_display = ('full_name', 'name', 'title', 'page', 'active', 'pagelet_class', 'order')\n list_filter = ('active', 'page__module__site', 'page__module', 'page', 'pagelet_class')\n inlines = [PageletDetailsInline, PageletAttributeInline]\n actions = ['activate', 'deactivate']\n ordering = ['page__module__site__domain', 'page__module__name', 'page__name', 'name']\n \n class Media:\n css = {\n 'all': ('ampcms/css/admin-extended.css',)\n }\n \n def full_name(self, obj):\n return '%s.%s.%s' % (obj.page.module.name, obj.page.name, obj.name)\n full_name.short_description = 'Pagelet'\n \n def activate(self, request, queryset):\n rows_updated = queryset.update(active=True)\n if rows_updated == 1:\n msg = '1 pagelet was activated'\n else:\n msg = '%s pagelets were activated' % rows_updated\n self.message_user(request, msg)\n activate.short_description = 'Activate selected pagelets'\n \n def deactivate(self, request, queryset):\n rows_updated = queryset.update(active=False)\n if rows_updated == 1:\n msg = '1 pagelet was activated'\n else:\n msg = '%s pagelets were activated' % rows_updated\n self.message_user(request, msg)\n deactivate.short_description = 'Deactivate selected pagelets'\n\nif settings.AMPCMS_WYSIWYG == 'ckeditor':\n from ckeditor.widgets import CKEditorWidget\n class PageletAdminForm(forms.ModelForm):\n content = forms.CharField(widget=CKEditorWidget(), required=False)\n class Meta:\n model = Pagelet\n PageletAdmin.form = PageletAdminForm\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Group, GroupAdmin)\nadmin.site.register(AmpCmsSite, SiteAdmin)\nadmin.site.register(Module, ModuleAdmin)\nadmin.site.register(Page, PageAdmin)\nadmin.site.register(Pagelet, PageletAdmin)","repo_name":"dwatkinsweb/amp-cms","sub_path":"ampcms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8572,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70602408748","text":"# prova algoritmo RANSAC su un po' di dati\nfrom __future__ import division, print_function\n\nimport matplotlib.pyplot as plt\nfrom scipy.io import readsav\nimport numpy as np\n\nplt.ion()\n\nimport sys \n#import MDSplus as mds\n\nfrom scipy.optimize import curve_fit\n\ndef fit_func( rho_f, rho_0, te_ext, delta_te, grad_te ) :\n\talpha = grad_te / delta_te\n\tte = te_ext + 0.5*delta_te * ( 1 + np.tanh( alpha*(rho_f - rho_0) ) )\n\treturn te\n\n# funzione di voto del ransac\n#def eval_func( rho_f, te_f, inlier_threshold, rho_0, te_ext, delta_te, grad_te ) :\n\t#alpha = grad_te / delta_te\n\t#delta_te = te_f - ( te_ext + 0.5*delta_te * ( 1 + np.tanh( alpha*(rho_f - rho_0) ) ) )\n\t#inliers = np.abs( delta_te ) < inlier_threshold\n\t#return np.count_nonzero( inliers )\n\ndef ransac_te( rho_f, te_f, fit_fn, p0, min_inliers=12, samples_to_fit=10,\n\t\t inlier_threshold=50., max_iters=100 ) : \n\t\n\tbest_pfit = None\n\tbest_model_performance = 0\n\t\n\tnum_samples = rho_f.shape[0]\n\t\n\tfor i in range(max_iters):\n\t\t# model fit\n\t\tsample = np.random.choice( num_samples, size=samples_to_fit, replace=False )\n\t\ttry:\n\t\t\tpfit, pcov = curve_fit( fit_func, rho_f[sample], te_f[sample], p0=p0 )\n\t\texcept :\n\t\t\tprint( i, \"sample fit failed\" )\n\t\t\tcontinue\n\t\t\n\t\t# model performance\n\t\t# model_performance = evaluate_fn( rho_f, te_f, inlier_threshold, p_fit )\n\t\tdelta_te = te_f - fit_func( rho_f, *pfit )\n\t\tinliers = np.abs( delta_te ) < inlier_threshold\n\t\tmodel_performance = np.count_nonzero( inliers )\n\t\t\n\t\t# print( i, model_performance )\n\t\tif model_performance < min_inliers :\n\t\t\tcontinue\n\t\t\n\t\tif model_performance > best_model_performance :\n\t\t\tbest_pfit = pfit\n\t\t\tbest_inliers = inliers\n\t\t\tbest_sample = sample\n\t\t\tbest_model_performance = model_performance\n\t\n\treturn best_pfit, best_inliers, best_sample\n\n# -----------------------------------------------------------------------------\n\ndata_dir = '/scratch/gobbin/rigoni/'\n\n\nshot_list = [ 30808 ]\n\nfig = plt.figure( 'RANSAC_2' )\nfig.set_size_inches( 6, 4.5 )\n\nfor shot in shot_list :\n\tprint( shot )\n\t\n\tif ( shot < 15600 or shot > 39391 ) :\n\t\tprint( \"invalid shot num: \", shot )\n\t\tcontinue\n\t\n\tfile = 'dsx3_%d.sav' % shot\n\tprint( file )\n\ttry:\n\t\tx = readsav( data_dir+file, python_dict=False ).st\n\t\t# x = readsav( data_dir+file, python_dict=True )\n\texcept:\n\t\tprint( \"invalid file: \", file )\n\t\tsys.exit(0)\n\t\n\tn_qsh = x.n_qsh[0]\n\tt_qsh_begin = np.atleast_1d( x.t1_arr[0]*1E-3 )\n\tt_qsh_end = np.atleast_1d( x.t2_arr[0]*1E-3 )\n\n\tt_min = t_qsh_begin[0]\n\tt_max = t_qsh_end[-1]\n\n\tqshs = []\n\tfor i_qsh in range( n_qsh ) :\n\t\tqsh_name = 'T%02d' % i_qsh\n\t\tqshs.append( x[qsh_name][0] )\n\n\ti_qsh = 0\n\tn_times = qshs[i_qsh].tempi[0].shape[0] \n\tprint( n_times )\n\t\n\tfor i_time in np.arange( n_times ) :\n\t\ttf = qshs[i_qsh].tempi[0][i_time]\n\n\t\t# label tempo in decimi di ms\n\t\ttttt = np.int( tf*1E4 )\n\n\t\tcase_label = r'%5d_%04d' % ( shot, tttt )\n\n\t\tte_ok = qshs[i_qsh].te3[0][i_time,:] > 0\n\n\t\tte = qshs[i_qsh].te3[0][i_time,te_ok]\n\t\trho = qshs[i_qsh].rho3[0][i_time,te_ok]\n\t\t# rb = qshs[i_qsh].prel3[0][i_time,te_ok]\n\n\t\tprint( qshs[i_qsh].tcentro[0][i_time] )\n\t\tprint( qshs[i_qsh].tbordo[0][i_time] )\n\t\tprint( qshs[i_qsh].grad2[0][i_time] )\n\t\tprint( qshs[i_qsh].pos2[0][i_time] )\n\n\t\t# -----------------------------------------------------------------------------\n\t\t\n\t\tk_ext = rho > 0.4\n\n\t\tte_ext_ini = te[k_ext].mean()\n\t\tdelta_te_ini = te.max() - te_ext_ini\n\n\t\trho_0_ini = 0.2\n\t\tgrad_te_ini = -5000.\n\t\tfig = plt.figure( 'RANSAC_2' )\n\t\tfig.clf()\n\t\t# ax2 = plt.subplot( 122 )\n\t\tax2 = plt.gca()\n\n\t\tax2.plot( rho, te, '.' )\n\t\tax2.set_ylabel( r'Te' )\n\t\tax2.set_xlabel( r'$\\rho$' )\n\t\tax2.set_title( case_label )\n\t\t\n\t\talpha_ini = grad_te_ini / delta_te_ini\n\t\txp = np.linspace( 0., np.ceil( rho.max()*10. )/10. )\n\t\typ = te_ext_ini + 0.5*delta_te_ini * ( 1 + np.tanh( alpha_ini*(xp - rho_0_ini) ) )\n\t\tax2.plot( xp, yp )\n\n\t\tp0 = ( rho_0_ini, te_ext_ini, delta_te_ini, grad_te_ini )\n\t\ttry:\n\t\t\tpfit, pcov = curve_fit( fit_func, rho, te, p0=p0 )\n\t\t\tax2.plot( xp, fit_func( xp, *pfit ), 'b' )\n\t\texcept :\n\t\t\tprint( case_label, \" VERY BAD!\" )\n\t\t\tplt.pause(0.8)\n\t\t\tcontinue\n\t\t\n\t\ttry:\n\t\t\tbfit, inliers, sample = ransac_te( rho, te, fit_func, p0, min_inliers=10, samples_to_fit=10,\n\t\t\t\tinlier_threshold=50., max_iters=100 )\n\n\t\t\tax2.plot( xp, fit_func( xp, *bfit ), 'r' )\n\t\t\tax2.plot( rho[inliers], te[inliers], 'x' )\n\t\texcept:\n\t\t\tprint( case_label, \" VERY BAD!\" )\n\t\t\tplt.pause(0.8)\n\t\t\t\n\t\tplt.pause(0.8)\n\n\t\t\n\t\t\n","repo_name":"AndreaRigoni/rfx-hunch","sub_path":"src/Tprofile_read/ransac/ransac_prova_2.py","file_name":"ransac_prova_2.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72919297388","text":"import time\nfrom random import randint\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndriver = webdriver.Firefox()\n\ndriver.get(\"http://www.python.org\")\n\nassert \"Python\" in driver.title\n\nt = randint(10,15)\n\ntime.sleep(t)\n\nelement = driver.find_element_by_name(\"q\")\n\nelement.clear() #for clearing the search field\n\nelement.send_keys(\"pycon\") #giving input to search key \"pycon\"\n\nelement.send_keys(Keys.RETURN) # returning result\n\nassert \"No results found.\" not in driver.page_source #if there is nothing in return\n\ntime.sleep(t)\n\ndriver.close()","repo_name":"Imran4424/Selenium-with-Python","sub_path":"Intro.py","file_name":"Intro.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6760307135","text":"import uuid\nfrom django.db import models\n\nfrom django.contrib.auth.models import User\nfrom next_crm.models.company import Company\nfrom next_crm.models.product.product_category import Product_category\nfrom next_crm.models.product.product_unit_of_measure import Product_unit_of_measure\nfrom next_crm.models.product.product_taxes import Product_taxes\nfrom next_crm.models.opportunity.sales_channel import SalesChannel\n\n\nclass Product_template(models.Model):\n\n\n\tid = models.AutoField(primary_key=True)\n\tuuid \t\t\t\t = models.UUIDField(default=uuid.uuid4, editable=False)\n\tname = models.CharField(max_length=255, blank=True )\n\tcan_be_sold = models.BooleanField(default=True, db_index=True)\n\tcan_be_purchased = models.BooleanField(default=True, db_index=True)\n\tcan_be_expended = models.BooleanField(default=False, db_index=True)\n\tevent_subscription = models.BooleanField(default=False, db_index=True)\n\n\tproduct_type = models.CharField(max_length=50, blank=True,null=True,db_index=True)\n\tproduct_category = models.ForeignKey(Product_category , models.SET_NULL, db_index=True, blank=True,null=True, )\n\tsale_price = models.FloatField(null=True)\n\tuofm = models.ForeignKey(Product_unit_of_measure , models.SET_NULL,related_name='unit_of_measure', db_index=True, blank=True,null=True, )\n\tpurchase_uofm = models.ForeignKey(Product_unit_of_measure , models.SET_NULL,related_name='purchase_unit_of_measure', db_index=True, blank=True,null=True, )\n\tdescription = models.TextField(blank=True,null=True)\n\ttax_on_sale = models.ForeignKey(Product_taxes , models.SET_NULL, related_name='tax_onsale', db_index=True, blank=True,null=True, )\n\twholesale_tax = models.ForeignKey(Product_taxes , models.SET_NULL, related_name='tax_onwholesale', db_index=True, blank=True,null=True, )\n\tnotes = models.TextField(blank=True,null=True)\n\tvendors_notes = models.TextField(blank=True,null=True)\n\tcreate_by_user = models.ForeignKey(User, models.SET_NULL,related_name='create_by', db_index=True, blank=True,null=True, )\n\tupdate_by_user = models.ForeignKey(User, models.SET_NULL,related_name='update_by', db_index=True, blank=True,null=True, )\n\tcompany \t\t\t = models.ForeignKey(Company, db_index=True, on_delete=models.CASCADE, blank=False, null=False)\n\tcreated_at = models.DateTimeField(auto_now_add= True)\n\tupdated_at = models.DateTimeField(auto_now= True )\n\tstate = models.BooleanField(default=True);\n\tactive = models.BooleanField(default=True);\n\tweight = models.FloatField(null=True)\n\twarrnety = models.FloatField(null=True)\n\tcolor = models.CharField(max_length=255, blank=True )\n\tvolume = models.FloatField(null=True)\n\tsales_channel = models.ForeignKey(SalesChannel, models.SET_NULL, db_index=True, blank=True,null=True)\n\n\n\n\n\tdef __str__(self):\n\t\treturn self.name\n\t\n\tclass Meta:\n\t\tapp_label = 'next_crm'\n","repo_name":"ambre1pravin/django-react","sub_path":"next_crm/models/product/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15242533494","text":"\r\n#========Excel operation-====================\r\n\"\"\"\r\nReading Data from excel file\r\nwriting data into excel file\r\ndata driven test case\r\n\r\n\"\"\"\r\nimport openpyxl\r\n\r\npath=\"C:\\\\Users\\kandeepx\\Documents\\B\\MM.xlsx\"\r\n\r\nworkbook=openpyxl.load_workbook(path)\r\nsheet=workbook.get_sheet_by_name(\"High\") #excel will have multiple sheets so to specify the sheet we r using this command or else we can use \"active\"\r\nrows=sheet.max_row\r\ncols=sheet.max_column\r\n\r\nprint(\"High sheet has below rows and cols :\")\r\nprint(rows)\r\nprint(cols)\r\n\r\n\r\nfor r in range(1,rows+1):\r\n for c in range(1,cols+1):\r\n print(sheet.cell(row=r,column=c).value,end=\" \")\r\n\r\n print()\r\n\r\n\r\n#==========Writing Excel==========\r\n\"\"\"\r\npath=\"C:\\\\Users\\kandeepx\\Documents\\B\\\\test.xlsx\"\r\nworkbook=openpyxl.load_workbook(path)\r\nsheet=workbook.active\r\n\r\nfor r in range(1,7):\r\n for c in range(1,5):\r\n sheet.cell(row=r,column=c).value=\"welcome\"\r\n\r\nworkbook.save(path)\r\n\"\"\"\r\n\r\n\r\n\r\n","repo_name":"kandeepanveera/Selenium","sub_path":"Data Driver Excel_Selenium.py","file_name":"Data Driver Excel_Selenium.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5897766702","text":"# Advent of Code 2020 Day 18\n# https://adventofcode.com/2020/18\n\n\nfrom collections import namedtuple\nfrom typing import List\n\n\n# Part 1\n# Grammar:\n# Expression = \"(\" Expression \")\" |\n# Expression Operator Number |\n# Number\n# Operator = \"+\" | \"*\"\n# Number = int()\n\nToken = namedtuple(\"Token\", \"type, value\", defaults=[None])\nToken.__repr__ = lambda self: f\"{self.type} {self.value}\"\n\n\ndef tokenize(raw_expr):\n\n if raw_expr == \"\":\n return None\n\n expr = raw_expr.split()\n if len(expr) > 1:\n tokens = []\n for e in expr:\n tokens.extend(tokenize(e))\n return tokens\n\n if raw_expr[0] == \"(\":\n return [Token(\"OpenParen\")] + tokenize(raw_expr[1:])\n elif raw_expr[-1] == \")\":\n return tokenize(raw_expr[:-1]) + [Token(\"CloseParen\")]\n elif raw_expr[0] == \"+\":\n return [Token(\"Operator\", \"Add\")]\n elif raw_expr[0] == \"*\":\n return [Token(\"Operator\", \"Multiply\")]\n elif raw_expr[0].isdigit():\n return [Token(\"Number\", int(raw_expr))]\n\n\ndef parse(tokens: List[Token]):\n # Recursive\n # Base case\n if len(tokens) == 0:\n return None\n if len(tokens) == 1:\n if type(tokens[0]) == int:\n return tokens[0]\n elif tokens[0].type == \"Number\":\n return tokens[0].value\n\n if tokens[-2].type == \"Operator\":\n operand2 = tokens[-1] if type(tokens[-1]) is int else tokens[-1].value\n if tokens[-2].value == \"Add\":\n return parse(tokens[:-2]) + operand2\n elif tokens[-2].value == \"Multiply\":\n return parse(tokens[:-2]) * operand2\n\n if tokens[-1].type == \"CloseParen\":\n paren_count = 1\n index = len(tokens) - 1\n while paren_count != 0:\n index -= 1\n if tokens[index].type == \"OpenParen\":\n paren_count -= 1\n elif tokens[index].type == \"CloseParen\":\n paren_count += 1\n return parse(tokens[:index] + [parse(tokens[index + 1 : -1])])\n\n\n# Part 2 - Addition has precedence over multiplication\n# Grammar:\n# Expression = \"(\" Expression \")\" |\n# Expression Multiply Number |\n# Number\n# Multiply = Expression \"*\" Expression |\n# Add \"*\" Expression\n# Add = Expression \"+\" Expression\n# Number = int()\n\n# if parens, parse those\n# if adds, parse those\n# if muls, parse those\n\n\ndef parse_part2(tokens: List[Token]):\n # Recursive\n # Base case\n if type(tokens) is int:\n return tokens\n if len(tokens) == 0:\n raise RuntimeError()\n if len(tokens) == 1:\n if type(tokens[0]) is int:\n return tokens[0]\n if tokens[0].type == \"Number\":\n return tokens[0].value\n\n if Token(\"OpenParen\") in tokens:\n open_paren_index = tokens.index(Token(\"OpenParen\"))\n paren_count = 1\n close_paren_index = open_paren_index\n while paren_count != 0:\n close_paren_index += 1\n if tokens[close_paren_index].type == \"OpenParen\":\n paren_count += 1\n elif tokens[close_paren_index].type == \"CloseParen\":\n paren_count -= 1\n return parse_part2(\n tokens[:open_paren_index]\n + [parse_part2(tokens[open_paren_index + 1 : close_paren_index])]\n + tokens[close_paren_index + 1 :]\n )\n\n elif Token(\"Operator\", \"Multiply\") in tokens:\n mul_index = tokens.index(Token(\"Operator\", \"Multiply\"))\n return parse_part2(\n parse_part2(tokens[:mul_index]) * parse_part2(tokens[mul_index + 1 :])\n )\n elif Token(\"Operator\", \"Add\") in tokens:\n add_index = tokens.index(Token(\"Operator\", \"Add\"))\n return parse_part2(\n parse_part2(tokens[:add_index]) + parse_part2(tokens[add_index + 1 :])\n )\n\n\ndef test_tokenize():\n assert tokenize(\"\") == None\n assert tokenize(\"1\") == [Token(\"Number\", 1)]\n assert tokenize(\"15\") == [Token(\"Number\", 15)]\n assert tokenize(\"154\") == [Token(\"Number\", 154)]\n assert tokenize(\"1 + 2\") == [\n Token(\"Number\", 1),\n Token(\"Operator\", \"Add\"),\n Token(\"Number\", 2),\n ]\n assert tokenize(\"(1 + 2)\") == [\n Token(\"OpenParen\"),\n Token(\"Number\", 1),\n Token(\"Operator\", \"Add\"),\n Token(\"Number\", 2),\n Token(\"CloseParen\"),\n ]\n\n\nif __name__ == \"__main__\":\n filename = \"./AdventOfCode/2020/day18-input.txt\"\n # filename = \"./AdventOfCode/2020/day18-example1-input.txt\"\n\n with open(filename) as f:\n lines = [line.rstrip() for line in f]\n # print(lines)\n # for line in lines:\n # print(line)\n # print(lines[0])\n # print(tokenize(lines[0]))\n # print(parse(tokenize(lines[0])))\n # print(tokenize(lines[1]))\n # print(parse(tokenize(lines[1])))\n\n part1 = sum([parse(tokenize(line)) for line in lines])\n print(f\"Part 1: {part1}\") # 209335026987\n\n part2 = sum([parse_part2(tokenize(line)) for line in lines])\n print(f\"Part 2: {part2}\") # 33331817392479\n","repo_name":"benhunter/coding-challenges","sub_path":"AdventOfCode/2020/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30221335908","text":"import copy\nimport time\nimport torch\nimport argparse\nfrom models.models import *\nfrom torch.utils.data import DataLoader\nfrom utils.load_data import ListDataSet\nfrom utils.transforms import *\nfrom torch import nn,optim\n\ndef train_model(model, device,dataloaders, criterion, optimizer, num_epochs, is_inception=False):\n since = time.time()\n\n val_acc_history = []\n train_acc_history=[]\n \n val_loss_history=[]\n train_loss_history=[]\n \n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n # Special case for inception because in training it has an auxiliary output. In train\n # mode we calculate the loss by summing the final output and the auxiliary output\n # but in testing we only consider the final output.\n if is_inception and phase == 'train':\n # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958\n outputs, aux_outputs = model(inputs)\n loss1 = criterion(outputs, labels)\n loss2 = criterion(aux_outputs, labels)\n loss = loss1 + 0.4*loss2\n else:\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n if phase == 'val':\n val_acc_history.append(epoch_acc)\n val_loss_history.append(epoch_loss)\n if phase == 'train':\n train_acc_history.append(epoch_acc)\n train_loss_history.append(epoch_loss)\n\n print()\n model.history={\"train_acc\":train_acc_history,\"train_loss\":train_loss_history,\"val_acc\":val_acc_history,\"val_loss\":val_loss_history}\n time_elapsed = time.time() - since\n print('{} Training complete in {:.0f}m {:.0f}s'.format(model_name,time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n torch.save(model,\"output/{}_{:.2f}.pkl\".format(model_name,best_acc))\n return model\n\nif __name__==\"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epochs\", type=int, default=20, help=\"number of epochs\")\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"size of each image batch\")\n parser.add_argument(\"--model_name\", type=str, default=\"resnet50\",nargs='+',help=\"model to train\")\n parser.add_argument(\"--pretrained\", type=bool, default=False,help=\"if use pretrained weights\")\n parser.add_argument(\"--feature_extract\", type=bool, default=False,help=\"if freeze some layers\")\n parser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\n parser.add_argument(\"--img_size\", type=int, default=224, help=\"size of each image dimension\")\n # parser.add_argument(\"--checkpoint_interval\", type=int, default=1, help=\"interval between saving model weights\")\n # parser.add_argument(\"--evaluation_interval\", type=int, default=1, help=\"interval evaluations on validation set\")\n opt = parser.parse_args()\n print(opt)\n \n # load data\n data_transforms=get_transforms()\n train_data=ListDataSet(\"train\",\"data/custom/train.txt\",transforms=data_transforms[\"train\"],)\n val_data=ListDataSet(\"val\",\"data/custom/val.txt\",data_transforms[\"val\"])\n \n trainloader = DataLoader(train_data, batch_size=opt.batch_size, shuffle=True,num_workers=opt.n_cpu)\n valloader=DataLoader(val_data, batch_size=opt.batch_size, shuffle=True,num_workers=4)\n dataloader={\"train\":trainloader,\"val\":valloader}\n his=[]\n for model_name in opt.model_name:\n # release gpu's memory\n torch.cuda.empty_cache()\n \n # initialize model\n model=None\n input_size=0\n model,input_size=initialize_model(\n model_name,\n num_classes=9,\n feature_extract=opt.feature_extract, \n use_pretrained=opt.pretrained\n )\n \n # train model\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n params_to_update = model.parameters()\n criterion = nn.CrossEntropyLoss()\n # optimizer=optim.SGD(params_to_update, lr=0.01, momentum=0.9)\n optimizer = optim.Adam(params_to_update,lr=0.001)\n model=train_model(model, device,dataloader, criterion, optimizer, num_epochs=opt.epochs, is_inception=(model_name==\"inception\"))\n his.append(model.history)\n ","repo_name":"recusant7/Pytorch_Image_classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"37442154021","text":"from dotenv import load_dotenv\nload_dotenv()\nimport os\nimport numpy as np\n\nfrom blizz import *\n\nimport pandas as pd\nfrom gspread_pandas import Spread\nfrom gspread_pandas.conf import get_config\nfrom gspread_formatting import *\nfrom ratelimit import limits, sleep_and_retry\n\nimport json\nimport time\n\nwowClasses = [\n '',\n 'Warrior',\n 'Paladin',\n 'Hunter',\n 'Rogue',\n 'Priest',\n 'Death Knight',\n 'Shaman',\n 'Mage',\n 'Warlock',\n 'Monk',\n 'Druid',\n 'Demon Hunter'\n]\n\narmour = [\n \"Name\",\n \"Class\",\n \"Head\",\n \"Shoulder\",\n \"Chest\",\n \"Wrist\",\n \"Hand\",\n \"Waist\",\n \"Legs\",\n \"Feet\"\n]\n\narmourCape = armour.copy()\narmourCape.insert(4,\"Back\")\nmisc = [\n \"Name\",\n \"Class\",\n \"Neck\",\n \"Ring\"\n]\n\nstart = time.perf_counter()\n\nclass AltClass(object):\n def __init__(self,altId,altName,altRealm,altClass,altRank,altProfession1=None,altProfession1Data=None,altProfession2=None,altProfession2Data=None): \n DEFAULT_PROF='Missing'\n self.altId=altId\n self.altName=altName\n self.altRealm=altRealm\n self.altClass=altClass\n self.altRank=altRank\n self.altProfession1=altProfession1 if altProfession1 is not None else DEFAULT_PROF\n self.altProfession1Data=altProfession1Data if altProfession1Data is not None else DEFAULT_PROF\n self.altProfession2=altProfession2 if altProfession2 is not None else DEFAULT_PROF\n self.altProfession2Data=altProfession2Data if altProfession2Data is not None else DEFAULT_PROF\n\ntry:\n BLIZZ_CLIENT=os.getenv(\"BLIZZ_CLIENT\")\n BLIZZ_SECRET=os.getenv(\"BLIZZ_SECRET\")\n SPREADSHEET_KEY=os.getenv(\"SPREADSHEET_KEY\")\n REALM=os.getenv(\"REALM\")\n GUILD=os.getenv(\"GUILD\")\n # RANKS=os.getenv(\"RANKS\").split(\".\")\nexcept KeyError:\n print(\"Environment variables not set correctly\") \n\ntrackedAlts = []\nlegCloth = []\nlegLeather = []\nlegMail = []\nlegPlate = []\nlegJewel = []\n\ntoken = getToken(BLIZZ_CLIENT,BLIZZ_SECRET)\nroster = getRoster(token,REALM,GUILD)\nrosterSize = len(roster)\n\nfor counter, alt in enumerate(roster, start = 1):\n print(str(counter) + \"/\" + str(rosterSize))\n tempAlt = AltClass(\n alt['character']['id'],\n alt['character']['name'],\n alt['character']['realm']['slug'],\n wowClasses[alt['character']['playable_class']['id']],\n alt['rank']\n )\n professions = getProfessions(token,alt['character']['name'],alt['character']['realm']['slug'])\n if professions != None:\n for counter, prof in enumerate(professions):\n if counter == 0:\n tempAlt.altProfession1 = prof['profession']['name']\n for expac in prof['tiers']:\n if \"Shadowlands\" in expac['tier']['name']:\n tempAlt.altProfession1Data = expac['known_recipes']\n elif counter == 1:\n tempAlt.altProfession2 = prof['profession']['name']\n for expac in prof['tiers']:\n if \"Shadowlands\" in expac['tier']['name']:\n tempAlt.altProfession2Data = expac['known_recipes']\n trackedAlts.append(tempAlt)\n\n# Find a workbook by name and open the first sheet\n# Make sure you use the right name here.\nspread = Spread(SPREADSHEET_KEY,config=get_config(os.getcwd() + '\\\\legtracker','client_secret.json'))\n\nfor alt in trackedAlts:\n # CLOTH LEGGO\n if ((alt.altProfession1 == \"Tailoring\") and (alt.altProfession1Data != \"Missing\")) or ((alt.altProfession2 == \"Tailoring\") and (alt.altProfession2Data != \"Missing\")):\n if alt.altProfession1 == \"Tailoring\":\n searchData = alt.altProfession1Data\n elif alt.altProfession2 == \"Tailoring\":\n searchData = alt.altProfession2Data\n head = sum(1 if x['name'] == \"Grim-Veiled Hood\" else 0 for x in searchData)\n shoulder = sum(1 if x['name'] == \"Grim-Veiled Spaulders\" else 0 for x in searchData)\n back = sum(1 if x['name'] == \"Grim-Veiled Cape\" else 0 for x in searchData)\n chest = sum(1 if x['name'] == \"Grim-Veiled Robe\" else 0 for x in searchData)\n wrist = sum(1 if x['name'] == \"Grim-Veiled Bracers\" else 0 for x in searchData)\n hand = sum(1 if x['name'] == \"Grim-Veiled Mittens\" else 0 for x in searchData)\n waist = sum(1 if x['name'] == \"Grim-Veiled Belt\" else 0 for x in searchData)\n legs = sum(1 if x['name'] == \"Grim-Veiled Pants\" else 0 for x in searchData)\n feet = sum(1 if x['name'] == \"Grim-Veiled Sandals\" else 0 for x in searchData)\n if head > 0:\n legCloth.append([alt.altName,alt.altClass,head,shoulder,back,chest,wrist,hand,waist,legs,feet])\n\n # LEATHER AND MAIL LEGGO\n if ((alt.altProfession1 == \"Leatherworking\") and (alt.altProfession1Data != \"Missing\")) or ((alt.altProfession2 == \"Leatherworking\") and (alt.altProfession2Data != \"Missing\")):\n if alt.altProfession1 == \"Leatherworking\":\n searchData = alt.altProfession1Data\n elif alt.altProfession2 == \"Leatherworking\":\n searchData = alt.altProfession2Data\n head = sum(1 if x['name'] == \"Umbrahide Helm\" else 0 for x in searchData)\n shoulder = sum(1 if x['name'] == \"Umbrahide Pauldrons\" else 0 for x in searchData)\n chest = sum(1 if x['name'] == \"Umbrahide Vest\" else 0 for x in searchData)\n wrist = sum(1 if x['name'] == \"Umbrahide Armguards\" else 0 for x in searchData)\n hand = sum(1 if x['name'] == \"Umbrahide Gauntlets\" else 0 for x in searchData)\n waist = sum(1 if x['name'] == \"Umbrahide Waistguard\" else 0 for x in searchData)\n legs = sum(1 if x['name'] == \"Umbrahide Leggings\" else 0 for x in searchData)\n feet = sum(1 if x['name'] == \"Umbrahide Treads\" else 0 for x in searchData)\n if head > 0:\n legLeather.append([alt.altName,alt.altClass,head,shoulder,chest,wrist,hand,waist,legs,feet])\n\n head = sum(1 if x['name'] == \"Boneshatter Helm\" else 0 for x in searchData)\n shoulder = sum(1 if x['name'] == \"Boneshatter Pauldrons\" else 0 for x in searchData)\n chest = sum(1 if x['name'] == \"Boneshatter Vest\" else 0 for x in searchData)\n wrist = sum(1 if x['name'] == \"Boneshatter Armguards\" else 0 for x in searchData)\n hand = sum(1 if x['name'] == \"Boneshatter Gauntlets\" else 0 for x in searchData)\n waist = sum(1 if x['name'] == \"Boneshatter Waistguard\" else 0 for x in searchData)\n legs = sum(1 if x['name'] == \"Boneshatter Greaves\" else 0 for x in searchData)\n feet = sum(1 if x['name'] == \"Boneshatter Treads\" else 0 for x in searchData)\n if head > 0:\n legMail.append([alt.altName,alt.altClass,head,shoulder,chest,wrist,hand,waist,legs,feet])\n\n # PLATE LEGGO\n if ((alt.altProfession1 == \"Blacksmithing\") and (alt.altProfession1Data != \"Missing\")) or ((alt.altProfession2 == \"Blacksmithing\") and (alt.altProfession2Data != \"Missing\")):\n if alt.altProfession1 == \"Blacksmithing\":\n searchData = alt.altProfession1Data\n elif alt.altProfession2 == \"Blacksmithing\":\n searchData = alt.altProfession2Data\n head = sum(1 if x['name'] == \"Shadowghast Helm\" else 0 for x in searchData)\n shoulder = sum(1 if x['name'] == \"Shadowghast Pauldrons\" else 0 for x in searchData)\n chest = sum(1 if x['name'] == \"Shadowghast Breastplate\" else 0 for x in searchData)\n wrist = sum(1 if x['name'] == \"Shadowghast Armguards\" else 0 for x in searchData)\n hand = sum(1 if x['name'] == \"Shadowghast Gauntlets\" else 0 for x in searchData)\n waist = sum(1 if x['name'] == \"Shadowghast Waistguard\" else 0 for x in searchData)\n legs = sum(1 if x['name'] == \"Shadowghast Greaves\" else 0 for x in searchData)\n feet = sum(1 if x['name'] == \"Shadowghast Sabatons\" else 0 for x in searchData)\n if head > 0:\n legPlate.append([alt.altName,alt.altClass,head,shoulder,chest,wrist,hand,waist,legs,feet])\n\n # JEWEL LEGGO\n if ((alt.altProfession1 == \"Jewelcrafting\") and (alt.altProfession1Data != \"Missing\")) or ((alt.altProfession2 == \"Jewelcrafting\") and (alt.altProfession2Data != \"Missing\")):\n if alt.altProfession1 == \"Jewelcrafting\":\n searchData = alt.altProfession1Data\n elif alt.altProfession2 == \"Jewelcrafting\":\n searchData = alt.altProfession2Data\n neck = sum(1 if x['name'] == \"Shadowghast Necklace\" else 0 for x in searchData)\n ring = sum(1 if x['name'] == \"Shadowghast Ring\" else 0 for x in searchData)\n if neck > 0:\n legJewel.append([alt.altName,alt.altClass,neck,ring])\n\n# if any category has no data add one row containing '-' characters \n# **allows a dataframe to be created using pandas, avoids errors later on**\n\nif not legCloth:\n legCloth.append(np.full(11,'-'))\nif not legLeather:\n legLeather.append(np.full(10,'-'))\nif not legMail:\n legMail.append(np.full(10,'-'))\nif not legPlate:\n legPlate.append(np.full(10,'-'))\nif not legJewel:\n legJewel.append(np.full(4,'-'))\n\n# create a pandas dataframe for each legendary category \nclothFrame = pd.DataFrame(data=legCloth,columns=armourCape)\nleatherFrame = pd.DataFrame(data=legLeather,columns=armour)\nmailFrame = pd.DataFrame(data=legMail,columns=armour)\nplateFrame = pd.DataFrame(data=legPlate,columns=armour)\njewelFrame = pd.DataFrame(data=legJewel,columns=misc)\n\n#use spread to upload the dataframes to the corresponding worksheets\nprint(\"Uploading 1/5\")\nspread.df_to_sheet(clothFrame, index=False, sheet='Cloth', replace=True)\nprint(\"Uploading 2/5\")\nspread.df_to_sheet(leatherFrame, index=False, sheet='Leather', replace=True)\nprint(\"Uploading 3/5\")\nspread.df_to_sheet(mailFrame, index=False, sheet='Mail', replace=True)\nprint(\"Uploading 4/5\")\nspread.df_to_sheet(plateFrame, index=False, sheet='Plate', replace=True)\nprint(\"Uploading 5/5\")\nspread.df_to_sheet(jewelFrame, index=False, sheet='Jewel', replace=True)\n\n#cellFormat object for the heading of the table in each worksheet\nhead = cellFormat(\n backgroundColor=color.fromHex('#999999'),\n textFormat=textFormat(bold=True, foregroundColor=color.fromHex('#000000')),\n horizontalAlignment='CENTER',\n borders=borders(border('solid'),border('solid'),border('solid'),border('solid'))\n )\n\n'''\ncellFormat object for the rest of the table in each worksheet\n'''\nbody = cellFormat(\n backgroundColor=color.fromHex('#ffffff'),\n textFormat=textFormat(bold=False,foregroundColor=color.fromHex('#000000')),\n horizontalAlignment='CENTER',\n borders=borders(border('solid'),border('solid'),border('solid'),border('solid'))\n )\n\nconditions = [\n ['rank','NUMBER_EQ','0','#ea9999'], # not learned\n ['rank','NUMBER_EQ','1','#f9cb9c'], # rank 1\n ['rank','NUMBER_EQ','2','#ffe599'], # rank 2\n ['rank','NUMBER_EQ','3','#b6d7a8'], # rank 3\n ['rank','NUMBER_EQ','4','#a2c4c9'], # rank 4\n ['name','CUSTOM_FORMULA','=$B2=\"Death Knight\"','#c41e3a'], # death knight\n ['name','CUSTOM_FORMULA','=$B2=\"Demon Hunter\"','#a330c9'], # demon hunter\n ['name','CUSTOM_FORMULA','=$B2=\"Druid\"','#ff7c0a'], # druid\n ['name','CUSTOM_FORMULA','=$B2=\"Hunter\"','#aad372'], # hunter\n ['name','CUSTOM_FORMULA','=$B2=\"Mage\"','#3fc7eb'], # mage\n ['name','CUSTOM_FORMULA','=$B2=\"Monk\"','#00ff98'], # monk\n ['name','CUSTOM_FORMULA','=$B2=\"Paladin\"','#f48cba'], # paladin\n ['name','CUSTOM_FORMULA','=$B2=\"Priest\"','#f3f3f3'], # priest\n ['name','CUSTOM_FORMULA','=$B2=\"Rogue\"','#fff468'], # rogue\n ['name','CUSTOM_FORMULA','=$B2=\"Shaman\"','#0070dd'], # shaman\n ['name','CUSTOM_FORMULA','=$B2=\"Warlock\"','#8788ee'], # warlock\n ['name','CUSTOM_FORMULA','=$B2=\"Warrior\"','#c69b6d'] # warrior\n\n]\n\n# create a list of conditional format rule objects and return the list\ndef createRules(ranks,names):\n rules = []\n for rule in conditions:\n if rule[0] == 'rank':\n tempRange = ranks\n elif rule[0] == 'name':\n tempRange = names\n newRule = ConditionalFormatRule(\n ranges=[tempRange],\n booleanRule=BooleanRule(\n condition=BooleanCondition(rule[1],[rule[2]]),\n format=CellFormat(backgroundColor=color.fromHex(rule[3]))\n )\n )\n rules.append(newRule)\n return rules\n\n# format worksheets and add conditional format rules\nfor leggo in ['Cloth','Leather','Mail','Plate','Jewel']:\n worksheet = spread.find_sheet(leggo)\n format_cell_ranges(worksheet, [('1', head), ('2:100', body)])\n set_column_widths(worksheet, [ ('A:B', 100), ('C:K', 65) ])\n sheetRules = get_conditional_format_rules(worksheet)\n if len(sheetRules) == 0:\n ranks = GridRange.from_a1_range('C2:K100', worksheet)\n names = GridRange.from_a1_range('A2:B100', worksheet)\n rules = createRules(ranks,names)\n sheetRules.clear()\n for rule in rules:\n sheetRules.append(rule)\n sheetRules.save()\n print(\"Conditional formatting has been added\")\n else:\n print(\"Conditional formatting has already been applied\")\n\nend = time.perf_counter()\nfinalTime = end - start\nprint(\"Spreadhseet has been updated!\")\nprint(\"Total time to complete was \" + str(finalTime) + \" seconds!\")","repo_name":"joefarrelly/LegTracker","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":13377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30955549891","text":"\r\nfrom . import ResourceBase\r\n\r\n\r\nclass Resource(ResourceBase):\r\n url = ''\r\n epgurl = 'http://new.s-tv.ru/tv/'\r\n\r\n def _get_epg(self):\r\n for d in self.date_ranges(1):\r\n soup = self.get_soup(\r\n '%s-/%s/' % (self.epgurl, d.strftime(\"%Y-%m-%d\")),\r\n cookies = self.cookie\r\n )\r\n for c in soup.findAll('table', class_='item_table'):\r\n name = c.find('td', class_='channel').find('img').get('alt')\r\n self.logger.debug('#########: %r', name)\r\n yield {\r\n 'name': name,\r\n 'items': self._get_epg_items(c.findAll('div', class_='prg_item'), d)\r\n }\r\n\r\n def _get_epg_items(self, item, date):\r\n priv = None\r\n for i in item:\r\n\r\n h, m = i.find('span', class_='prg_item_time').string.split('.')\r\n if priv is not None and int(h) < priv:\r\n date = date + self.datetime.timedelta(days=1)\r\n priv = int(h)\r\n\r\n date_start = date.replace(hour=int(h), minute=int(m), second=0)\r\n title = category = description = arts = ''\r\n\r\n prg_item = i.find('a')\r\n if not prg_item:\r\n prg_item = i.find('span', class_='prg_item_no')\r\n title = prg_item.getText()\r\n else:\r\n title = prg_item.getText()\r\n href = prg_item.get('href')\r\n ab, an, pab, pan = (0,0,0,0)\r\n m = self.re.search('^#(ab|an|pab|pan)(\\d+)$', href)\r\n if m and m.group(1) == 'ab': ab = m.group(2)\r\n if m and m.group(1) == 'an': an = m.group(2)\r\n if m and m.group(1) == 'pab': pab = m.group(2)\r\n if m and m.group(1) == 'pan': pan = m.group(2)\r\n if ab or an:\r\n try:\r\n info_soup = self.get_soup('%sajaxinfo/%s/%s/%s/%s' % \\\r\n (self.epgurl, ab, an, pab, pan))\r\n desc_soup = info_soup.find('div', class_='ajax-info-desc').find('p')\r\n if desc_soup:\r\n description = desc_soup.getText('\\n')\r\n h3_soup = info_soup.find('h3')\r\n if h3_soup:\r\n title = h3_soup.string\r\n h4_soup = info_soup.find('h4')\r\n if h4_soup:\r\n description = '%s\\n%s' % (h4_soup.string, description)\r\n arts_soup = info_soup.findAll('img', src=self.re.compile('^http'))\r\n if arts_soup:\r\n arts = [i.get('src') for i in arts_soup]\r\n details_soup = info_soup.find('div', class_='ajax-info-people')\r\n if details_soup:\r\n cat_soup = details_soup.find('p', class_='type')\r\n if cat_soup: category = cat_soup.string\r\n r = self.re.search(r'Жанр:\\n(.*?)\\n', details_soup.getText('\\n'))\r\n if category and r:\r\n category = '%s/%s' % (cat, r.group(1))\r\n elif r:\r\n category = r.group(1)\r\n except Exception as e:\r\n pass\r\n\r\n self.logger.debug('### title: %r', title)\r\n self.logger.debug('### categ: %r', category)\r\n self.logger.debug('### descr: %r', description)\r\n self.logger.debug('### start: %s', date_start)\r\n self.logger.debug('### arts: %s', arts)\r\n yield {\r\n 'title': title,\r\n 'date_start': date_start,\r\n 'category': category,\r\n 'description': description,\r\n 'arts': arts\r\n }\r\n","repo_name":"rleschuk/tvservice-server","sub_path":"app/coretv/mod_stv.py","file_name":"mod_stv.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31295969230","text":"import os, sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nfrom decision_module import DecisionModule\nfrom gv import dbg, kg\nfrom action import StandaloneAction\nfrom event import NewTransitionEvent\n\nclass Restart(DecisionModule):\n \"\"\"\n The Restart module listens for a game over and will restart the game.\n\n \"\"\"\n def __init__(self, active=False):\n super().__init__()\n self._active = active\n\n\n def process_event(self, event):\n \"\"\" Process an event from the event stream. \"\"\"\n if not self._active:\n return\n if type(event) is NewTransitionEvent:\n if 'RESTART' in event.new_obs and \\\n 'RESTORE' in event.new_obs and \\\n 'QUIT' in event.new_obs:\n self._eagerness = 1.\n if 'You have died' in event.new_obs:\n self._eagerness = 1.\n\n\n def take_control(self):\n \"\"\" Performs the previously extracted action \"\"\"\n obs = yield\n dbg(\"[RESTART] Restarting Game\")\n action = StandaloneAction(\"IEEECIG-ADVENT-RESTART-COMMAND\")\n response = yield action\n kg.reset()\n self._eagerness = 0.\n","repo_name":"microsoft/nail_agent","sub_path":"agent/decision_modules/restart.py","file_name":"restart.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"28901855775","text":"'''\nCreated on 12/05/2014\n\n@author: Aluno\n'''\nimport os\nimport Box2D as b2\nimport pygame\nfrom pygame.locals import QUIT\nfrom math import pi\nfrom pygame.constants import KEYDOWN, K_SPACE, KEYUP, MOUSEBUTTONDOWN, \\\n MOUSEBUTTONUP, MOUSEMOTION\nimport math\n\nFPS = 30.0\nPPM = 16.0\n\npygame.init()\nscreen = pygame.display.set_mode((800, 600), 0, 32)\n\ngatoImg = pygame.image.load(os.path.join('', 'Gato.png')).convert_alpha()\nmuroImg = pygame.image.load(os.path.join('', 'muro.jpg')).convert_alpha()\npisoImg = pygame.image.load(os.path.join('', 'ground.png')).convert_alpha()\nestilingImg = pygame.image.load(os.path.join('', 'Estilingue.png')).convert_alpha()\n\ngato = {\"pos\": [5.0, 6.0], \"angle\": 0.0, \"type\": b2.b2_dynamicBody, \"shape\": b2.b2PolygonShape(box=[4.0, 2.0]),\n \"density\": 15.0, \"restitution\": 0.5, \"friction\": 0.5, \"image\": gatoImg, \"scale\": [10.0, 5.0]}\n\nparede1 = {\"pos\": [40.0, 3.5], \"angle\": 0.0, \"type\": b2.b2_dynamicBody, \"shape\": b2.b2PolygonShape(box=[1.0, 3.0]),\n \"density\": 15.0, \"restitution\": 0.5, \"friction\": 0.5, \"image\": muroImg, \"scale\": [10.0, 5.0]}\nbloco1 = {\"pos\": [40.0, 9.0], \"angle\": 0.0, \"type\": b2.b2_dynamicBody, \"shape\": b2.b2PolygonShape(box=[2.0, 2.0]),\n \"density\": 15.0, \"restitution\": 0.5, \"friction\": 0.5, \"image\": muroImg, \"scale\": [10.0, 5.0]}\n\npiso = {\"pos\": [25.0, 0.5], \"angle\": 0.0, \"type\": b2.b2_staticBody, \"shape\": b2.b2PolygonShape(box=[25.0, 0.5]),\n \"density\": 100.0, \"restitution\": 0.2, \"friction\": 0.3, \"image\": pisoImg, \"scale\": [10.0, 5.0]}\n\n\ndef get_position_from_center(center_pos, size):\n (center_x, center_y) = center_pos\n (comprimento, altura) = size\n pos_x = (center_x) - (comprimento / 2)\n pos_y = (center_y) - (altura / 2)\n return (int(pos_x), int(pos_y))\n\n\ndef geraBody(world, obj):\n bodyDef = b2.b2BodyDef()\n bodyDef.position = obj[\"pos\"]\n bodyDef.type = obj[\"type\"]\n bodyDef.angle = obj[\"angle\"]\n\n body = world.CreateBody(bodyDef)\n\n fixDef = b2.b2FixtureDef()\n fixDef.shape = obj[\"shape\"]\n fixDef.restitution = obj[\"restitution\"]\n fixDef.friction = obj[\"friction\"]\n fixDef.density = obj[\"density\"]\n body.CreateFixture(fixDef)\n\n body.userData = obj\n\n return body\n\n\ng = b2.b2Vec2(0.0, -9.8)\nmundo = b2.b2World(g, True)\ngatoBody = geraBody(mundo, gato)\nparede1Body = geraBody(mundo, parede1)\nbloco1Body = geraBody(mundo, bloco1)\npisoBody = geraBody(mundo, piso)\n\n\ndef distancia(ponto1, ponto2):\n dist = math.sqrt((ponto1[0] - ponto2[0]) ** 2 + (ponto1[1] - ponto2[1]) ** 2)\n return dist\n\n\ndef drawPolygon(scr, obj, ppm):\n pontos = []\n shape = obj.fixtures[0].shape\n for v in shape.vertices:\n v = (obj.transform * v)\n vertice = [v[0] * ppm, scr.get_height() - (v[1] * ppm)]\n pontos.append(vertice)\n\n # print pontos\n # print pontos\n pygame.draw.polygon(scr, (255, 0, 0), pontos, 2)\n\n\ndef draw(scr, obj, ppm):\n drawPolygon(scr, obj, ppm)\n if 'image' in obj.userData:\n image = obj.userData[\"image\"]\n image_scale = obj.userData[\"scale\"]\n image_transformed = pygame.transform.scale(image,\n (int(image_scale[0] * ppm), int(image_scale[1] * ppm)))\n image_transformed = pygame.transform.rotate(image_transformed, (obj.transform.angle * 180 / pi))\n # if ( int(body.transform.angle) != 0):\n # imagem = pygame.transform.rotate(imagem, (body.transform.angle * math.pi / 180)).copy()\n pos = obj.position.copy()\n pos[0] = (pos[0]) * ppm\n pos[1] = scr.get_height() - (pos[1] * ppm)\n # pos = (int(pos[0] * PPM), int(pos[1] * PPM))\n size = (image_transformed.get_width(), image_transformed.get_height())\n screen.blit(image_transformed, get_position_from_center(pos, size))\n\n\nclk = pygame.time.Clock()\nincForca = 0\nforca = 1\ndisparo = False\ninitialPos = (200, 400)\nmousePos = initialPos\n\nwhile (True):\n screen.fill((0, 0, 0))\n draw(screen, gatoBody, PPM)\n draw(screen, pisoBody, PPM)\n draw(screen, parede1Body, PPM)\n draw(screen, bloco1Body, PPM)\n pygame.display.update()\n clk.tick(30)\n if (disparo == False):\n gatoBody.position = [mousePos[0] / PPM, (screen.get_height() - mousePos[1]) / PPM]\n gatoBody.linearVelocity = (0, 0)\n gatoBody.angularVelocity = 0\n gatoBody.awake = True\n\n mundo.Step(1.0 / FPS, 8, 3)\n\n for e in pygame.event.get():\n if (e.type == QUIT):\n exit()\n elif (e.type == MOUSEBUTTONDOWN):\n if (e.button == 1):\n mousePos = e.pos\n\n elif (e.type == MOUSEMOTION):\n if (e.buttons[0] == 1):\n mousePos = e.pos\n forca = distancia(initialPos, mousePos)\n\n\n\n elif (e.type == MOUSEBUTTONUP):\n if (e.button == 1):\n print\n forca\n gatoBody.ApplyForce((gatoBody.fixtures[0].density * forca * 1000, 0), gatoBody.position, True)\n disparo = True","repo_name":"antoniorcn/pypatrol","sub_path":"estilinguecat.py","file_name":"estilinguecat.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24118258439","text":"import NeuralNetwork\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n\"\"\" Reading saved Data \"\"\"\nX_train = np.load('x_egitim.npy')\ny_desired = np.load('yd_egitim.npy')\n\nX_test = np.load('x_test.npy')\ny_test_desired = np.load('yd_test.npy')\n\nallLosses = []\n\nfor i in range(10):\n Network = NeuralNetwork.NeuralNetwork([50, 12, 4])\n epoch, loss, test_loss, test_accuracies, train_accuracies = Network.train(x_train=X_train, y_train=y_desired, x_test=X_test,\n y_test=y_test_desired, epochs=100, learning_rate=i*0.1, alfa=0.6, tqdm_=True, stop_error=0.00001)\n allLosses.append(loss)\n\n\nfor i in range(len(allLosses)):\n plt.plot(allLosses[i])\n plt.xlabel(\"Her veri için hata, Learning rate = \" + str(i*0.1))\n plt.ylabel(\"Toplam Karesel Ortalama Hata\")\n plt.show()\n","repo_name":"rumeysayilma/ann-hw","sub_path":"first-question_different_learning_rates.py","file_name":"first-question_different_learning_rates.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4791544645","text":"# -*- coding: utf-8 -*-\n\nfrom takamaru import Reddit, Hawk\nfrom constants import SUBREDDITS, REDDIT_RECIPIENTS\n\n\ndef main():\n r_instance = Reddit()\n posts_hot = r_instance.search(hot=True)\n hawk = Hawk()\n\n for i, ph in enumerate(posts_hot):\n subject = \"Popular Reddit Posts in /r/{sub}\".format(sub=SUBREDDITS[i])\n for r in REDDIT_RECIPIENTS:\n hawk.gmail_hawk(subject=subject, source='reddit',\n body=ph, recipients=[r])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vipul-sharma20/takamaru","sub_path":"takamaru/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30804858687","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nZetCode PyQt4 tutorial \n\nThis example shows three labels on a window\nusing absolute positioning. \n\nauthor: Jan Bodnar\nwebsite: zetcode.com \nlast edited: October 2011\n\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\n\nclass Example(QtGui.QWidget):\n \n def __init__(self):\n super(Example, self).__init__()\n \n self.initUI()\n \n def initUI(self):\n \n \n lbl1 = QtGui.QLabel('Welcome to GIT GUI', self)\n lbl1.move(15, 10)\n\n lbl2 = QtGui.QLabel('Welcome to GIT', self)\n lbl2.move(100,300 )\n\n okButton = QtGui.QPushButton(\"Next\")\n cancelButton = QtGui.QPushButton(\"Quit\")\n\n hbox = QtGui.QHBoxLayout()\n hbox.addStretch(1)\n hbox.addWidget(okButton)\n hbox.addWidget(cancelButton)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addStretch(1)\n vbox.addLayout(hbox)\n \n self.setLayout(vbox) \n \n self.setGeometry(100, 100, 1000, 1000)\n self.setWindowTitle('GIT GUI') \n self.show()\n \ndef main():\n \n app = QtGui.QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ukrishnank83/gitgui","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15029512487","text":"\"\"\"\nThis script takes a csv, process the data\nand then insert that data into dataconjuntos table\n\"\"\"\nimport re\nimport os\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom decouple import config\nfrom connection import get_engine, get_session\nfrom sqlalchemy.sql import text\nfrom sqlalchemy import exc\n\nfrom loggings import set_up_loggin\n\n# Logger\nlogger = set_up_loggin(config(\"FILE_LOGGER_NAME\"))\n\n# engine\nengine = get_engine(\n config(\"USER\"), \n config(\"PASSWORD\"), \n config(\"HOST\"), \n config(\"DB_NAME\")\n )\n\n# session\nsession = get_session()\n\n\ndef find_files():\n \"\"\"\n This function finds CSV files of interest\n \n @return: a list with the lasted CSV files downloaded\n \"\"\"\n logger.info(\"Search for the most recent files of interest started\")\n \n # Defining path\n folders = [\"museos\", \"cines\", \"bibliotecas\"] # Main folder\n subfolder = [] # subfolder\n files = [] # CSV files\n \n try: \n subfolder.append(os.listdir(\"museos\")[-1])\n subfolder.append(os.listdir(\"cines\")[-1])\n subfolder.append(os.listdir(\"bibliotecas\")[-1])\n \n \n for i in range(3):\n path = folders[i]\n path = os.path.join(path, subfolder[i])\n files.append(os.listdir(path)[-1])\n except Exception as ex:\n logger.error(f\"{ex} im tabla_datos_conjuntos,py\") \n \n complete_path = []\n for i in range(3):\n complete_path.append(os.path.join(folders[i], subfolder[i], files[i]))\n\n logger.info(\"Path of the most recent files found\") \n \n return complete_path \n \n \ndef get_col_of_insterest(df: pd.DataFrame):\n \"\"\"\n This function takes a DataFrame object \n and normalize its column names, \n then returns a DataFrame object \n with solely the columns of interest\n \n @params : DataFrame object\n @return : DataFrame object with solely the columns of interest\n \"\"\"\n # Dictionaries. Both dictionaries have two elements per line\n stress_vowels = {\n \"á\": \"a\", \"é\": \"e\",\n \"í\": \"i\", \"ó\": \"o\",\n \"ú\": \"u\"\n } \n \n col_names = {\n \"cod_loc\": \"cod_localidad\", \"idprovincia\": \"id_provincia\",\n \"iddepartamento\": \"id_departamento\", \"categoria\": \"categoria\",\n \"provincia\": \"provincia\", \"localidad\": \"localidad\",\n \"nombre\": \"nombre\", \"domicilio\": \"domicilio\",\n \"direccion\": \"domicilio\", \"cp\": \"codigo_postal\",\n \"telefono\": \"numero_de_telefono\", \"mail\": \"mail\", \n \"web\": \"web\"\n }\n \n # Column names to lower case\n df = df.rename(str.lower, axis = 1) \n \n # Replacing stress vowels to normal vowels\n for key, value in stress_vowels.items():\n df = df.rename(\n lambda x: re.sub(key, value, x), axis = 1\n ) \n \n df = df.rename(col_names, axis = 1)\n \n # Settings columns to filter. \n cols = list(col_names.values())\n cols.remove(\"domicilio\") # As domicilio is twice, we are deleting one of them.\n \n return df[cols] \n\n\ndef clean_columns(df: pd.DataFrame): \n \"\"\"\n This function takes a dataframe object\n and apply some methods in orden to standarize data.\n (string to lower case, no stress vowels, managment of missing values)\n \n @params : dataframe object\n \n @return : dataframe object with processed data.\n \"\"\"\n stress_vowels = {\n \"á\": \"a\", \"é\": \"e\",\n \"í\": \"i\", \"ó\": \"o\",\n \"ú\": \"u\"\n } \n \n # Columns with strings\n cols = [\n \"categoria\", \"provincia\", \n \"localidad\", \"nombre\", \n \"domicilio\", \"mail\", \n \"web\"\n ]\n \n # Replacing stress vowels\n df[cols] = df[cols].apply(lambda x: x.str.lower())\n for key, value in stress_vowels.items():\n for col in cols:\n df[col] = df[col].apply(lambda x: re.sub(key, value, str(x)))\n \n # Treating missing values\n cols_missing = df[(df.isnull().sum() > 0).index].columns\n \"\"\"\n Giving a look at the columns i realized \n that there are some missing values (nan) present there; \n however, there are some words that also represent\n missing values (sin direccion, s/n), so, let's unify them. \n \"\"\"\n df[cols_missing] = df[cols_missing].replace(\"sin direccion\", np.nan)\n df[cols_missing] = df[cols_missing].replace(\"s/d\", np.nan)\n df[cols_missing] = df[cols_missing].replace(\"s/n\", np.nan)\n df[cols_missing] = df[cols_missing].replace(\"nan\", np.nan)\n \n # deleting spaces within phone numbers\n df[\"numero_de_telefono\"] = df[\"numero_de_telefono\"].apply(\n lambda x: re.sub(\" \", \"\", str(x))\n )\n \n # Changing dtype of columns\n # cols_type = df.select_dtypes(\"object\").columns\n # df[cols_type] = df[cols_type].astype(\"string\")\n \n df[[\"categoria\", \"provincia\", \"localidad\"]] = df[[\n \"categoria\", \n \"provincia\", \n \"localidad\"\n ]].astype(\"category\")\n \n df[[\"cod_localidad\", \"id_provincia\", \"id_departamento\"]] = df[[\n \"cod_localidad\", \"id_provincia\", \"id_departamento\"\n ]].astype(\"string\")\n \n df[\"fecha_carga\"] = datetime.datetime.now()\n \n return df\n \n \ndef build_final_df(): \n \"\"\"\n This function builds the final df\n \n @return: DataFrame object\n \"\"\"\n \n complete_path = find_files()\n \n dfs = [] # List of dataframes object with the columns of interest\n for i in complete_path:\n # It create a filtered dataframe \n try:\n dfs.append(get_col_of_insterest(pd.read_csv(i))) \n except Exception as ex:\n logger.error(f\"{ex} in tabla_datos_conjuntos.py\")\n # concatening dataframes \n final_df = pd.concat([dfs[0], dfs[1]], ignore_index=True)\n final_df = pd.concat([final_df, dfs[2]], ignore_index=True) \n \n final_df = clean_columns(final_df)\n \n return final_df\n \n \ndef insert_datosconjuntos():\n \"\"\"\n This function insert information from\n pandas dataframe to sql table\n \"\"\"\n \n datos_conjuntos = build_final_df()\n \n datos_conjuntos.to_sql(\n \"datosconjuntos\",\n con = engine, \n if_exists=\"replace\", \n index = False\n )\n \n \ndef quantity_records_category():\n \"\"\"\n This function execute a query to \n show the quantity of records are per\n category\n \"\"\"\n \n with engine.connect() as con:\n try:\n with open(\"sql_scripts/query_cantidadxcategoria.sql\") as file:\n query = text(file.read())\n result = con.execute(query)\n except exc.SQLAlchemyError as alche_error:\n logger.error(f\"{alche_error} in tabla_datos_comjuntos\") \n \n pd.DataFrame(result).to_sql(\n \"cantidadxcategorias\",\n con = engine,\n if_exists =\"replace\",\n index = False\n ) \n \n \ndef quantity_records_province_category():\n \"\"\"\n This function execute a query to \n show the quantity of records are per\n province and category\n \"\"\"\n \"sql_scripts/cantidadxprovinciaycategorias.sql\"\n \n with engine.connect() as con:\n try:\n with open( \"sql_scripts/query_cantidadxprovinciaycategorias.sql\") as file:\n query = text(file.read())\n result = con.execute(query) \n except exc.SQLAlchemyError as alche_error:\n logger.error(f\"{alche_error} in tabla_datos_comjuntos\") \n \n pd.DataFrame(result).to_sql(\n \"cantidadxprovinciaycategorias\",\n con = engine,\n if_exists =\"replace\",\n index = False\n )\n \n\ndef main_tabla_datos_conjuntos():\n \"\"\"\n This function executes the complete file.\n \"\"\" \n logger.info(\"The execution of tabla_datos_conjuntos.py started\")\n \n insert_datosconjuntos()\n \n logger.info(\"The execution of tabla_datos_conjuntos.py finished\")\n \n \nif __name__ == '__main__':\n main_tabla_datos_conjuntos()\n ","repo_name":"gioleon/challenge-alkemy","sub_path":"tabla_datos_conjuntos.py","file_name":"tabla_datos_conjuntos.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23823259585","text":"from flask import Flask, render_template, request, jsonify\nimport os\nfrom flask_cors import CORS, cross_origin\n\nfrom preprocessing import DecodeFileToBase64\nimport SpeechToText\n\nos.putenv('LANG', 'en_US.UTF-8')\nos.putenv('LC_ALL', 'en_US.UTF-8')\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/\", methods = ['GET'])\n@cross_origin()\ndef home():\n return render_template('index.html')\n\n\n@app.route(\"/predict\", methods = ['POST'])\n@cross_origin()\ndef predictRoute():\n audioFile = request.json['sound']\n DecodeFileToBase64(audioFile, \"AudioFile.wav\")\n result = SpeechToText.SpeechToTextConversion(\"AudioFile.wav\")\n return jsonify({\"Result\" : str(result)})\n\n\n\n\nif __name__ == '__main__':\n #app.run(host='0.0.0.0', port=5000, debug=True)\n app.run(debug=True)\n","repo_name":"RajeshKGangwar/SpeechToTextConversion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32625574425","text":"# Solves the branch sum problem\n\n# The recursive solution is quite obvious\n# Trying to think of other potential approaches, possibly iterative instead.\n# Iterative solutions only work for DFS-based approaches for binary tree objects\n# I can't use DFS here to construct a solution,\n# so it seems that I am confined to the recursive solution.\n\n# If the input was given in the form of an adjacency list, I could have an iterative solution.\n# However, this is not the case.\n\n\nclass BinaryTree:\n def __init__(self, val):\n self.value = val\n self.left = None\n self.right = None\n \n def print_tree(self):\n if self.left:\n print(f\"{self.value}: Left {self.left.value}.\")\n self.left.print_tree()\n\n if self.right:\n print(f\"{self.value}: Right {self.right.value}.\")\n self.right.print_tree()\n\n\n# O(n) time | O(n) space\ndef branch_sums(node):\n result = []\n \n if node:\n left_res = [node.value + val for val in branch_sums(node.left)]\n right_res = [node.value + val for val in branch_sums(node.right)]\n\n result.extend(left_res)\n result.extend(right_res)\n \n if not node.left and not node.right:\n result.append(node.value)\n \n return result\n \n\nif __name__ == \"__main__\":\n bt = BinaryTree(6)\n bt_list = []\n\n for element in [3, 5, 2, 5, 4, 7, 4]:\n bt_list.append(BinaryTree(element))\n \n bt.left = bt_list[0]\n bt.right = bt_list[1]\n\n bt_list[0].left = bt_list[2]\n bt_list[0].right = bt_list[3]\n\n bt_list[1].right = bt_list[4]\n\n bt_list[3].left = bt_list[5]\n bt_list[3].right = bt_list[6]\n\n # bt.print_tree()\n\n result = branch_sums(bt)\n\n assert set(result) == set([11, 21, 18, 15]), \"Not quite there yet.\"\n\n print(\"You're all set!\")","repo_name":"tobeyOguney/Zoo-of-Algorithms","sub_path":"Branch Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"4744641647","text":"from utils import *\r\nfrom torch.optim import Adam\r\nfrom tqdm import tqdm\r\nimport time\r\nfrom torch import tensor\r\nimport torch.nn.functional as F\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\ndef train_two_layer_model(model, optimizer, data, adj_label_1, adj_label_2, alpha, tau):\r\n model.train()\r\n optimizer.zero_grad()\r\n output, x_1, x_2 = model(data.x)\r\n loss_train_class = F.nll_loss(output[data.train_mask], data.y[data.train_mask])\r\n loss_Ncontrast_1 = Ncontrast(x_1, adj_label_1, tau)\r\n loss_Ncontrast_2 = Ncontrast(x_2, adj_label_2, tau)\r\n loss_train = loss_train_class + alpha * (loss_Ncontrast_1 + loss_Ncontrast_2)\r\n loss_train.backward()\r\n optimizer.step()\r\n return\r\n\r\n\r\ndef train_three_layer_model(model, optimizer, data, adj_label_1, adj_label_2, adj_label_3, alpha, tau):\r\n model.train()\r\n optimizer.zero_grad()\r\n output, x_1, x_2, x_3 = model(data.x)\r\n loss_train_class = F.nll_loss(output[data.train_mask], data.y[data.train_mask])\r\n loss_Ncontrast_1 = Ncontrast(x_1, adj_label_1, tau)\r\n loss_Ncontrast_2 = Ncontrast(x_2, adj_label_2, tau)\r\n loss_Ncontrast_3 = Ncontrast(x_3, adj_label_3, tau)\r\n loss_train = loss_train_class + alpha * (loss_Ncontrast_1 + loss_Ncontrast_2 + loss_Ncontrast_3)\r\n loss_train.backward()\r\n optimizer.step()\r\n return\r\n\r\n\r\ndef train_four_layer_model(model, optimizer, data, adj_label_1, adj_label_2, adj_label_3, adj_label_4, alpha, tau):\r\n model.train()\r\n optimizer.zero_grad()\r\n output, x_1, x_2, x_3, x_4 = model(data.x)\r\n loss_train_class = F.nll_loss(output[data.train_mask], data.y[data.train_mask])\r\n loss_Ncontrast_1 = Ncontrast(x_1, adj_label_1, tau)\r\n loss_Ncontrast_2 = Ncontrast(x_2, adj_label_2, tau)\r\n loss_Ncontrast_3 = Ncontrast(x_3, adj_label_3, tau)\r\n loss_Ncontrast_4 = Ncontrast(x_4, adj_label_4, tau)\r\n loss_train = loss_train_class + alpha * (loss_Ncontrast_1 + loss_Ncontrast_2 + loss_Ncontrast_3 + loss_Ncontrast_4)\r\n loss_train.backward()\r\n optimizer.step()\r\n return\r\n\r\n\r\ndef evaluate(model, data):\r\n model.eval()\r\n\r\n with torch.no_grad():\r\n logits = model(data.x)\r\n\r\n outs = {}\r\n for key in ['train', 'val', 'test']:\r\n mask = data['{}_mask'.format(key)]\r\n loss = F.nll_loss(logits[mask], data.y[mask]).item()\r\n pred = logits[mask].max(1)[1]\r\n acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\r\n\r\n outs['{}_loss'.format(key)] = loss\r\n outs['{}_acc'.format(key)] = acc\r\n\r\n return outs\r\n\r\n\r\ndef run(dataset, model, runs, epochs, lr, weight_decay, early_stopping, alpha, tau, k, permute_masks=None, lcc=False, adj_label_1=None, adj_label_2=None,\r\n adj_label_3=None, adj_label_4=None):\r\n val_losses, accs, durations = [], [], []\r\n\r\n lcc_mask = None\r\n if lcc: # select largest connected component\r\n data_ori = dataset[0]\r\n data_nx = to_networkx(data_ori)\r\n data_nx = data_nx.to_undirected()\r\n print(\"Original #nodes:\", data_nx.number_of_nodes())\r\n data_nx = data_nx.subgraph(max(nx.connected_components(data_nx), key=len))\r\n print(\"#Nodes after lcc:\", data_nx.number_of_nodes())\r\n lcc_mask = list(data_nx.nodes)\r\n\r\n data = dataset[0]\r\n\r\n pbar = tqdm(range(runs), unit='run')\r\n\r\n for _ in pbar:\r\n\r\n if permute_masks is not None:\r\n data = permute_masks(data, dataset.num_classes, lcc_mask)\r\n data = data.to(device)\r\n\r\n model.reset_parameters()\r\n optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\r\n\r\n if torch.cuda.is_available():\r\n torch.cuda.synchronize()\r\n\r\n t_start = time.perf_counter()\r\n\r\n best_val_loss = float('inf')\r\n test_acc = 0\r\n val_loss_history = []\r\n\r\n for epoch in range(1, epochs + 1):\r\n if k == 2:\r\n out = train_two_layer_model(model, optimizer, data, adj_label_1, adj_label_2, alpha, tau)\r\n elif k == 3:\r\n out = train_three_layer_model(model, optimizer, data, adj_label_1, adj_label_2, adj_label_3, alpha, tau)\r\n else:\r\n out = train_four_layer_model(model, optimizer, data, adj_label_1, adj_label_2, adj_label_3, adj_label_4,\r\n alpha, tau)\r\n eval_info = evaluate(model, data)\r\n eval_info['epoch'] = epoch\r\n\r\n if eval_info['val_loss'] < best_val_loss:\r\n best_val_loss = eval_info['val_loss']\r\n test_acc = eval_info['test_acc']\r\n\r\n val_loss_history.append(eval_info['val_loss'])\r\n if early_stopping > 0 and epoch > epochs // 2:\r\n tmp = tensor(val_loss_history[-(early_stopping + 1):-1])\r\n if eval_info['val_loss'] > tmp.mean().item():\r\n break\r\n\r\n if torch.cuda.is_available():\r\n torch.cuda.synchronize()\r\n\r\n t_end = time.perf_counter()\r\n\r\n val_losses.append(best_val_loss)\r\n accs.append(test_acc)\r\n durations.append(t_end - t_start)\r\n\r\n loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations)\r\n\r\n print('Val Loss: {:.4f}, Test Accuracy: {:.3f} ± {:.3f}, Duration: {:.3f}'.\r\n format(loss.mean().item(),\r\n acc.mean().item(),\r\n acc.std().item(),\r\n duration.mean().item()))\r\n","repo_name":"YJ199804/AM-NDC","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8766320928","text":"\ndef say_hello(person):\n print(f'hello {person.title()}, how are you'.title() + '?')\n\ndef square(x):\n output = x*x\n return(output)\n\nif __name__ == \"__main__\":\n print('python program')\n say_hello('tim')\n mysquare = square(40)\n print(f'my square is {mysquare}')\nelse:\n print(\"script ran when imported\")\n\n","repo_name":"timlamec/if-name-is-main-imports-and-direct-runs-","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11879775190","text":"import shutil\nfrom Common import *\nfrom Params import *\nimport unittest\nimport warnings\nimport threading\n\nusage = ['LogTool - extracts Overcloud Errors and provides statistics',\n '1) Set needed configuration in Params.py configuration file.',\n '2) cd python3 -m unittest LogTool_Plugin.LogTool.test_1_Export_Overcloud_Errors',\n '3) python3 -m unittest LogTool_Plugin.LogTool',\n '4) Start specific test: \"python3 -m unittest LogTool_Plugin.LogTool.test_1_Export_Overcloud_Errors\" to start this script']\nif len(sys.argv)==1 or (sys.argv[1] in ['-h','--help']):\n spec_print(usage, 'yellow')\n sys.exit(1)\n\n\n\n# Parameters #\nerrors_on_execution = {}\ncompeted_nodes={}\nworkers_output={}\n\n\n### Check given user_start_time ###\nif check_time(user_start_time)!=True:\n print_in_color('FATAL ERROR - provided \"user_start_time\" value: \"'+user_start_time+'\" in Params.py is incorrect!!!')\n sys.exit(1)\n\n### Get all nodes ###\nnodes=[]\nall_nodes = exec_command_line_command('source ' + source_rc_file_path + 'stackrc;openstack server list -f json')['JsonOutput']\nall_nodes = [{'Name': item['name'], 'ip': item['networks'].split('=')[-1]} for item in all_nodes]\nfor node in all_nodes:\n if check_ping(node['ip']) is True:\n nodes.append(node)\n else:\n print_in_color('Warning - ' + str(node) + ' will be skipped, due to connectivity issue!!!', 'yellow')\n\n\n### Create Result Folders ###\nif result_dir in os.listdir('.'):\n shutil.rmtree(result_dir)\nos.mkdir(result_dir)\n\n\nclass LogTool(unittest.TestCase):\n @staticmethod\n def raise_warning(msg):\n warnings.warn(message=msg, category=Warning)\n\n @staticmethod\n def run_on_node(node):\n print('-------------------------')\n print(node)\n print('--------------------------')\n print('\\n' + '-' * 40 + 'Remote Overcloud Node -->', str(node) + '-' * 40)\n result_file = node['Name'].replace(' ', '') + '.log'\n s = SSH(node['ip'], user=overcloud_ssh_user, key_path=overcloud_ssh_key)\n s.ssh_connect_key()\n s.scp_upload('Extract_On_Node.py', overcloud_home_dir + 'Extract_On_Node.py')\n s.ssh_command('chmod 777 ' + overcloud_home_dir + 'Extract_On_Node.py')\n command = \"sudo \" + overcloud_home_dir + \"Extract_On_Node.py '\" + str(\n user_start_time) + \"' \" + overcloud_logs_dir + \" '\" + grep_string + \"'\" + ' ' + result_file + ' ' + save_raw_data+' None '+log_type\n print('Executed command on host --> ', command)\n com_result = s.ssh_command(command)\n print(com_result['Stdout']) # Do not delete me!!!\n if 'SUCCESS!!!' in com_result['Stdout']:\n print_in_color(str(node) + ' --> OK', 'green')\n workers_output[str(node)]=com_result['Stdout'].splitlines()[-2]\n competed_nodes[node['Name']] = True\n else:\n print_in_color(str(node) + ' --> FAILED', 'yellow')\n self.raise_warning(str(node) + ' --> FAILED')\n errors_on_execution[node['Name']] = False\n s.scp_download(overcloud_home_dir + result_file, os.path.join(os.path.abspath(result_dir), result_file+'.gz'))\n # Clean all #\n files_to_delete = ['Extract_On_Node.py', result_file]\n for fil in files_to_delete:\n s.ssh_command('rm -rf ' + fil)\n s.ssh_close()\n\n \"\"\" Start LogTool and export Errors from Overcloud, execution on nodes is running in parallel\"\"\"\n def test_1_Export_Overcloud_Errors(self):\n print('\\ntest_1_Export_Overcloud_Errors')\n mode_start_time = time.time()\n threads=[]\n for node in nodes:\n t=threading.Thread(target=self.run_on_node, args=(node,))\n threads.append(t)\n t.start()\n for t in threads:\n t.join()\n script_end_time = time.time()\n if len(errors_on_execution) == 0:\n spec_print(['Completed!!!', 'Result Directory: ' + result_dir,\n 'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]'], 'green')\n else:\n if len(errors_on_execution)==len(nodes):\n spec_print(['Execution has failed for all nodes :-( ',\n 'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]'],'red')\n else:\n spec_print(['Completed with failures!!!', 'Result Directory: ' + result_dir,\n 'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]',\n 'Failed nodes:'] + [k for k in list(errors_on_execution.keys())], 'yellow')\n if len(competed_nodes)==0:\n self.raise_warning('LogTool execution has failed to be executed on all Overcloud nodes :-(')\n\n\n \"\"\" Start LogTool and export Errors from Undercloud \"\"\"\n def test_2_Export_Undercloud_Errors(self):\n print('\\ntest_2_Export_Undercloud_Errors')\n mode_start_time = time.time()\n result_file = 'Undercloud.log'\n log_root_dir=str(undercloud_logs)\n command = \"sudo python3 Extract_On_Node.py '\" + str(user_start_time) + \"' \" + \"'\" + log_root_dir + \"'\" + \" '\" + grep_string + \"'\" + ' ' + result_file\n com_result=exec_command_line_command(command)\n shutil.move(result_file+'.gz', os.path.join(os.path.abspath(result_dir),result_file+'.gz'))\n end_time=time.time()\n if com_result['ReturnCode']==0:\n spec_print(['Completed!!!','Result Directory: '+result_dir,'Execution Time: '+str(end_time-mode_start_time)+'[sec]'],'green')\n workers_output['UndercloudNode'] = com_result['CommandOutput'].splitlines()[-2]\n else:\n spec_print(['Completed!!!', 'Result Directory: ' + result_dir,\n 'Execution Time: ' + str(end_time - mode_start_time) + '[sec]'], 'red')\n if com_result['ReturnCode']!=0:\n self.raise_warning('LogTool execution has failed to be executed on Underloud logs :-(')\n\n \"\"\" This test will create a Final report. The report file will be created only when ERRORs have been detected.\n Report file will be used as indication to ansible to PASS or FAIl, in case of failure it will \"cat\" its\n content.\n \"\"\"\n def test_3_create_final_report(self):\n print('\\ntest_3_create_final_report')\n report_file_name = 'LogTool_Report.log'\n if report_file_name in os.listdir('.'):\n os.remove(report_file_name)\n report_data=''\n\n for key in workers_output:\n if 'Total_Number_Of_Errors:0' not in workers_output[key]:\n report_data+='\\n'+key+' --> '+workers_output[key]\n if len(report_data)!=0:\n append_to_file(report_file_name,report_data+\n '\\n\\nFor more details, check LogTool result files on your setup:'\n '\\n'+os.path.abspath(result_dir))\n","repo_name":"zahlabut/LogTool","sub_path":"Plugin_For_Infrared_Python3/LogTool_Plugin.py","file_name":"LogTool_Plugin.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"38512117502","text":"#!/usr/env/python3\n\nfrom csv import DictReader, DictWriter\nimport json\nimport os\n\n# Write govuk-local-transactions-updated.csv using data from the artefacts\n# directory. The slug is taken from the file name and all other data is taken\n# from the artefact itself. The three tier columns added here were not included\n# in the original spreadsheet from which govuk-local-transactions.csv was\n# exported.\n\n\nALL_TIERS = ['district', 'county', 'unitary']\nARTEFACT_DIRECTORY = 'artefacts'\n\n\ndef build_row(slug, data):\n row = {'slug': slug}\n\n row['title'] = data['title']\n row['LGSL'] = data['details']['lgsl_code']\n row['LGIL'] = data['details']['lgil_override']\n\n tiers = data['details']['local_service']['providing_tier']\n for tier in ALL_TIERS:\n row[tier] = bool(tier in tiers)\n\n return row\n\n\nwith open('govuk-local-transactions-updated.csv', 'w') as f:\n fields = ['slug', 'title', 'LGSL', 'LGIL'] + ALL_TIERS\n writer = DictWriter(f, fields)\n writer.writeheader()\n\n rows = []\n for artefact in os.listdir(ARTEFACT_DIRECTORY):\n slug = os.path.splitext(artefact)[0]\n with open(os.path.join(ARTEFACT_DIRECTORY, artefact), 'r') as artefact_file:\n data = json.load(artefact_file)\n rows.append(build_row(slug, data))\n\n rows.sort(key=lambda row: row['slug'])\n writer.writerows(rows)\n","repo_name":"jennyd/govuk-local-interactions","sub_path":"update_local_transactions_csv.py","file_name":"update_local_transactions_csv.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29568045103","text":"import sys\n\nimport requests\nimport json\n\n#url = 'https://vestec.epcc.ed.ac.uk/EDI/WFAHotspot'\nurl = 'http://localhost:8000/EDI/WFAHotspot'\n\nif len(sys.argv) < 3:\n print(\"Error, you must provide the incidentID and hotspot file as a command line argument\")\n sys.exit(-1)\n\nhotspot_file=open(sys.argv[2], \"r\")\nread_bytes=hotspot_file.read()\nhotspot_file.close()\n\nmsg = {\"incidentID\": sys.argv[1], \"payload\" : read_bytes}\n\nx = requests.post(url+\"-\"+sys.argv[1], data = json.dumps(msg))\nprint(x.text)\n","repo_name":"VESTEC-EU/vestec-system","sub_path":"WorkflowManager/workflows/wildfire/inject_dummy_hotspot_data.py","file_name":"inject_dummy_hotspot_data.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70278688749","text":"import os, glob, codecs\n\n#________________________________________\ndef has_BOM(File):\n f = open(File, 'rb'); b = f.read(len(codecs.BOM_UTF8)); f.close()\n return b == codecs.BOM_UTF8\n\n#________________________________________\ndef del_BOM(File):\n if has_BOM(File):\n f = open(File, 'rb'); f.read(len(codecs.BOM_UTF8)); c = f.read(); f.close()\n f = open(File, 'wb'); f.write(c); f.close()\n return True\n return False\n\n#________________________________________\ndef add_BOM(File):\n if not has_BOM(File):\n f = open(File, 'rb'); c = f.read(); f.close()\n f = open(File, 'wb'); f.write(codecs.BOM_UTF8); f.write(c); f.close()\n return True\n return False\n\n#________________________________________\ndef is_ANSI(File):\n f = codecs.open(File, 'r', encoding = 'utf8')\n try:\n f.read(); f.close()\n return False #; print('is already utf-8 conform: ' + os.path.split(File)[1])\n except:\n f.close()\n return True #; print('is not utf-8 conform: ' + os.path.split(File)[1])\n\n#________________________________________\ndef Convert_To_UTF8(File):\n if is_ANSI(File):\n with open(File, 'r') as f:\n c = f.read(); f.close()\n with codecs.open(File, 'w', encoding = 'utf8') as f:\n f.write(c); f.close()\n return True\n return False\n\n#________________________________________\ndef Convert_Files(Root, Extensions = ['cpp', 'c', 'h', 'hpp']):\n n = 0\n for Extension in Extensions:\n Files = glob.glob('{:s}/**/*.{:s}'.format(Root, Extension), recursive=True)\n for File in Files:\n # if has_BOM(File):\n # print(File.replace(Root, '.'))\n # if add_BOM(File):\n # print(File.replace(Root, '.'))\n # n += 1\n # if del_BOM(File):\n # print(File.replace(Root, '.'))\n # n += 1\n\n # if is_ANSI(File):\n # print(File.replace(Root, '.'))\n if Convert_To_UTF8(File):\n print(File.replace(Root, '.'))\n n += 1\n print('{:d} replacement(s)'.format(n))\n return True\n\n#________________________________________\nConvert_Files('F:/develop/saga/saga-code/master/saga-gis/src')\n","repo_name":"saga-gis/saga-gis","sub_path":"saga-gis/src/accessories/helper/convert2utf-8.py","file_name":"convert2utf-8.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"40404255425","text":"import pycrfsuite as pcrf\nimport os\nimport cv2\nimport pickle\nimport numpy as np\nimport crf \n\ntests = os.listdir(os.getcwd()+'/tests')\ntest_images = [cv2.imread('tests/'+filename, 0)for filename in tests]\n\ntest_segs = [pickle.load(open('test_segs/'+filename[:-4]+'.pkl','r')) for filename in tests]\n\nx_test = [crf.img2features(img, seg) for img, seg in zip(test_images, test_segs)]\n\ntagger = pcrf.Tagger()\ntagger.open('building_area.crfsuite')\n\ny_preds = [tagger.tag(test) for test in x_test]\nfor i in range(len(y_preds)) :\n\th,w = test_images[i].shape\n\tres = np.zeros((h,w), dtype = np.uint8)\n\tfor x in range(h):\n\t\tfor y in range(w):\n\t\t\tres[x,y] = int(y_preds[i][x*w+y])\n\tcv2.imwrite('crf_results/'+tests[i], res)\n\tcv2.imshow(\"i\", res)\n\tcv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"ron-debajyoti/IP_Area_Extraction","sub_path":"tag_crf.py","file_name":"tag_crf.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39951843225","text":"from rest_framework import serializers\nfrom events import models\n\nclass EventListSerializer(serializers.ModelSerializer):\n\n author = serializers.StringRelatedField()\n\n class Meta:\n model = models.Event\n fields = (\"id\", \"title\", \"content_preview\", \"author\", \"create_date\", \"start_date\", \"end_date\")\n","repo_name":"Rom4eg/myCompany","sub_path":"myCompany/events/serializers/events_list.py","file_name":"events_list.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28124470147","text":"# this file will scrape platinum motor's website to\n# get the rental car's available and all their details\n\n# import necessary libraries\nimport bs4\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\n\n# get the car names\ndef getCarNames(rawInput):\n # all results\n allResults = []\n # iterate over each row of car\n for row in rawInput:\n # apply regex to get the name\n result = re.sub(r'<.*?>', '', str(row))\n result = re.sub(r'(\\t){5,}', ' ', result)\n result = re.sub(r'^( )|( )$', '', result)\n allResults.append(result)\n return allResults\n\n\n# get the car images\ndef getCarImages(rawInput):\n # all results\n allResults = []\n # iterate over each row\n for row in rawInput:\n # apply regex to get the src\n result = re.findall(r'src=\".*\"', str(row))[0]\n result = re.sub(r'src=\"', '', result)\n result = re.sub(r'\"', '', result)\n allResults.append(result)\n return allResults\n\n# get the car details\ndef getCarDetails(rawInput):\n # all results\n allResults = []\n # iterate over each row\n for row in rawInput:\n # apply regex to get the details\n result = re.sub(r'<.*?>', '', str(row))\n result = re.sub(r',', '', result)\n result = re.findall(r'[0-9.+,+]+', result)\n # append all numbers to string and separate by pipes |\n finalRow = ''\n for x in range(0, len(result)):\n finalRow += result[x] + '|'\n allResults.append(finalRow)\n return allResults\n\n\n# --------------------- BEGIN MAIN DRIVER CODE HERE -------------------\n# get the response\nresponse = requests.get('https://platinummotorcars.com/exotic-car-rentals-dallas.htm')\n\n# get the page data\nsoup = BeautifulSoup(response.content, 'html.parser')\n\n# get the car names\ncarNames = soup.findAll(\"span\", {\"class\": \"item-name\"})\nparsedCarNames = getCarNames(carNames)\n\n# get the car images\ncarImages = soup.findAll(\"img\", {\"class\": \"img-responsive\"})\nparsedCarImages = getCarImages(carImages)\n\n# get the car details\ncarDetails = soup.findAll(\"div\", {\"class\": \"col-sm-7 col-xs-12\"})\nparsedCarDetails = getCarDetails(carDetails)\n\n# format output\n# pipe | will separate attributes for cars\n# newline '\\n' will separate each row of cars\nwith open('out.txt', 'w') as f:\n for x in range(0, len(parsedCarNames)):\n f.write(str(parsedCarNames[x]) + '|' + str(parsedCarImages[x]) + '|' + str(parsedCarDetails[x]) + '\\n')\n","repo_name":"jeremybrachle/SeniorDesign","sub_path":"seniorDesignPM/src/assets/files/pmScrapeManual.py","file_name":"pmScrapeManual.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14346852266","text":"import numpy as np\n\n# Hamming distance problem\ndef calculate_hamming_distance(string1, string2):\n hamming_dist = 0\n try:\n for i in range(0,len(string1)):\n if string1[i] != string2[i]:\n hamming_dist +=1\n return hamming_dist\n except:\n print('String1 and string2 do not have the same lenght')\n\n\n# Neighborhood of a string\n'''\n pattern - sequence\n d - hamming distance max.\n Output: The collection of strings Neighbors(Pattern, d)\n \n'''\n\n#By recursion\ndef Neighbors(pattern, d):\n nucleotides = {'A','C','G','T'}\n if d == 0:\n return [pattern]\n if len(pattern)==1:\n return nucleotides\n neighborhood = []\n suffixNeighbors = Neighbors(pattern[1:],d)\n for text in suffixNeighbors:\n if calculate_hamming_distance(pattern[1:],text) int:\n count = 0\n for i in patterns:\n l = len(i)\n for j in range(len(word)):\n if i == word[j:j+l]:\n count += 1\n break\n return count\n \n ","repo_name":"Atul-Verma-Git/100-days-of-code","sub_path":"number-of-strings-that-appear-as-substrings-in-word/number-of-strings-that-appear-as-substrings-in-word.py","file_name":"number-of-strings-that-appear-as-substrings-in-word.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36197501780","text":"import os\nimport sys\nfrom flasgger import Swagger\nfrom nameko.standalone.rpc import ClusterRpcProxy\nfrom flask import Flask, jsonify, request\nfrom gevent.pywsgi import WSGIServer\nfrom gevent import monkey\n\nmonkey.patch_all()\n\ncur_path = os.path.abspath(sys.argv[0])\nwork_space = os.sep.join(cur_path.split(os.sep)[:-2])\nsys.path.append(work_space)\n\napp = Flask(__name__)\napp.config['JSON_SORT_KEYS'] = False\nCONFIG = {'AMQP_URI': \"pyamqp://guest:guest@localhost\"}\nSwagger(app)\n\n\n@app.route('/hello', methods=['GET'])\ndef get_apis():\n return jsonify({\"web_state\": \"success\"})\n\n\n@app.route('/hello_world', methods=['GET'])\ndef hello():\n \"\"\"\n Micro Service for hello, say hello to you.\n 中文说明:这是一个say hello的接口。\n ---\n parameters:\n - in: query\n name: name\n required: true\n description: your name.\n schema:\n type : string\n example: jessica\n responses:\n 200:\n description: OK\n \"\"\"\n name = request.json[\"name\"]\n with ClusterRpcProxy(CONFIG) as rpc:\n result = rpc.nameko_service_1.hello(name=name)\n return result, 200\n\n\n@app.route('/hello_world', methods=['POST'])\ndef hello_people():\n \"\"\"\n Micro Service for hello_people\n 中文说明:这是一个say how are you的接口。\n ---\n parameters:\n - name: body\n in: body\n required: true\n schema:\n id: data\n properties:\n name:\n type: string\n responses:\n 200:\n description: say how are you to you.\n \"\"\"\n name = request.json[\"name\"]\n with ClusterRpcProxy(CONFIG) as rpc:\n result = rpc.nameko_service_2.hello(name=name)\n return result, 200\n\n\nif __name__ == '__main__':\n app.config['JSON_AS_ASCII'] = False\n port = 8001\n http_server = WSGIServer(('0.0.0.0', port), app)\n http_server.serve_forever()\n","repo_name":"sunjianzhou/nameko_test","sub_path":"nameko_example_two/nameko_test.py","file_name":"nameko_test.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"15355298387","text":"\"\"\"\n-- coding: utf-8 --\n@project:ILP_PROJECT\n@File:config.py\n@IDE:PyCharm\n@Author:fangyx\n@Date:2023/8/13 22:39\n\"\"\"\n\nimport os\n\n\n# 隐式等待时间\nIMPLICTLY_WAIT_TIMEOUT = 5\n\n# host\nTEST_HOST = \"http://192.168.32.70:2888/\"\nUAT_HOST = \"http://192.168.32.70:2888/\"\n\n# 获取项目根目录(获取当前文件父级目录的上一级目录)\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# 报告地址\nREPORTS_PATH = os.path.join(ROOT_PATH, \"reports\")\n\n# image_path 保存截图的路径\nIMAGE_PATH = os.path.join(REPORTS_PATH, \"screeshots\")\n\n# 用于文件上传功能的文件地址\nFILES_PATH = os.path.join(ROOT_PATH, \"files\")\n\n#元素定位维护文件父目录\nELEMENT_PATH = os.path.join(ROOT_PATH,'locators')\n\n#测试数据维护文件目录\nTESTDATA_PATH = os.path.join(ROOT_PATH,'data')\n","repo_name":"Zlv399/ILP_PROJECT","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31037653979","text":"from datetime import datetime\nfrom ftplib import FTP\nimport time\nimport os\n\nftp = FTP(\"63.33.239.182\")\n\ndef connect():\n ftp.login(\"pi\", \"pi\")\n ftp.cwd(\"files\")\n ftp.set_pasv(False)\n\ndef getMs(dateTime):\n return datetime.strptime(dateTime,'%d.%m.%Y %H:%M:%S').timestamp() * 1000\n\ndef current():\n return round(time.time() * 1000)\n\ndef check():\n try:\n files = ftp.nlst()\n currentTime = current()\n \n for i in files:\n # print(i)\n modifiedTime = ftp.sendcmd('MDTM ' + i)[4:].strip()\n splitted = list(modifiedTime) #20210816121029\n dateString = splitted[6] + splitted[7]+ \".\" + splitted[4] + splitted[5] + \".\" + splitted[0] + splitted[1] + splitted[2] + splitted[3]+ \" \"+ splitted[8] + splitted[9]+ \":\"+ splitted[10] + splitted[11]+ \":\"+ splitted[12] + splitted[13]\n\n diff = currentTime - getMs(dateString) # 10805199 10877240.0\n \n if diff < 12890832:\n if not os.path.isfile('./' + i):\n ftp.retrbinary(\"RETR \" + i, open(i, 'wb').write)\n print(\"İNDİRİLECEK DOSYA:\")\n print(i)\n except:\n connect()\n check()\n\nwhile True:\n check()\n time.sleep(1)","repo_name":"emre-h/cam-server","sub_path":"ftp-checker.py","file_name":"ftp-checker.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20137803662","text":"import os\nimport fnmatch\nimport glob\n\nroach_pixels_all = [[0,1,2,3],[4,5,6,7],[8,9,10,11],[12,13,14,15]]\n\ndef lookup_roach_files(obsnum,\n roach_list=['roach0', 'roach1', 'roach2', 'roach3'],\n path='/data_lmt/spectrometer/',\n debug=False):\n \"\"\"\n Returns a tuple of the roach files which match a particular obsnum \n and the number of those files.\n Args:\n obsnum (int): target obvservation number\n roach_list (list): list of the directories of roach files\n path (str): path to the roach directories\n debug (boolean): if debug True, tends to print out more information\n Returns:\n (filenames (list), result (int)) : list of file names, number \n of files found\n \"\"\"\n nroach = len(roach_list)\n filenames = []\n result = 0\n for roach in roach_list:\n spec_filenames = glob.glob(os.path.join(path, roach, \n '%s_%d_*.nc' % (roach, obsnum)))\n for filename in spec_filenames:\n if debug:\n print('found %s' % (filename))\n if not 'allantest' in filename:\n if debug:\n print('append %s' % (filename))\n filenames.append(filename)\n result = result + 1\n if filenames == []:\n if debug:\n print('lookup_roach_files: no files for obsnum', obsnum)\n return (filenames, result)\n\n\ndef find_roach_from_pixel(pixel_id):\n \"\"\"\n Returns roach number on which target pixel is located.\n Args:\n pixel_id (int): target pixel number\n Returns:\n i (int): roach number on which target pixel is located\n \"\"\"\n for i, lis in enumerate(roach_pixels_all):\n if pixel_id in lis:\n return [i]\n return []\n\ndef create_roach_list(pixel_list):\n \"\"\"\n Returns list of roach boards to be read given a list of pixels.\n Args:\n pixel_list (list): list of target pixels\n Returns:\n roach_list (list): list of roach boards to be read\n \"\"\"\n rid = [0, 0, 0, 0]\n for pixel_id in pixel_list:\n r = find_roach_from_pixel(pixel_id)\n if r != []:\n rid[r[0]] = 1\n roach_list = []\n for i in range(4):\n if rid[i] == 1:\n roach_list.append('roach%d' % (i))\n return roach_list\n","repo_name":"teuben/SpectralLineReduction","sub_path":"lmtslr/utils/roach_file_utils.py","file_name":"roach_file_utils.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37675250499","text":"from typing import *\nfrom math import *\n\nclass Solution(object):\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if n == 0:\n return 1.0\n\n neg, n = n < 0, abs(n)\n\n res = 1.0\n x2 = x\n while n > 0:\n if n & 1 == 1:\n res *= x2\n x2 *= x2\n n >>= 1\n\n return 1.0 / res if neg else res\n\n def myPow1(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if n == 0:\n return 1.0\n neg, n = n < 0, abs(n)\n\n def _pow(a, m):\n if m == 1:\n return a\n odd, m = m & 1, m >> 1\n ans = _pow(a, m)\n ans *= ans\n if odd:\n ans *= a\n return ans\n\n res = _pow(x, n)\n return 1.0 / res if neg else res\n\nsol = Solution()\ndata = 2.00000, 10 # 1024.00000\n# data = 2.10000, 3 # 9.26100\n# data = 2.00000, -2 # 0.25000\nprint(data)\nres = sol.myPow(*data)\nprint(res)\n","repo_name":"calfzhou/just-coding","sub_path":"50-myPow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39526586508","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n progressBars for functions (could use on client side)\n\"\"\"\nimport math\nfrom progressbar import AnimatedMarker, Bar, Counter, ETA, \\\n Percentage, Widget, ProgressBar, Timer\n\n\nclass Speed(Widget):\n FORMAT = '%6.2f %s/s'\n PREFIXES = 'okMGTPEZY'\n\n def update(self, pbar):\n \"\"\"Updates the widget with the current SI prefixed speed.\"\"\"\n\n if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0\n scaled = power = 0\n else:\n speed = pbar.currval / pbar.seconds_elapsed\n power = int(math.log(speed, 1000))\n scaled = speed / 1000.**power\n\n return self.FORMAT % (scaled, self.PREFIXES[power])\n# http://www.artima.com/weblogs/viewpost.jsp?thread=240845\n# class example(object):\n# # def wrapped(*args):\n# # f(args)\n# # return wrapped\n# def __init__(self, f):\n# self.f = f\n# def __call__(self, *args):\n# \"\"\"\n# The __call__ method is not called until the\n# decorated function is called.\n# \"\"\"\n# print \"Inside __call__()\"\n# self.f(*args)\n# @example\n\n\ndef together(interval=10000):\n things = [AnimatedMarker(), \" \", Counter(), \"/{} \".format(interval),\n Percentage(), ' ', Speed(), ' ', Bar(), ' ', Timer(), ' ', ETA()]\n pbar = ProgressBar(widgets=things, maxval=interval).start()\n for i in range(interval):\n pbar.update(i + 1)\n pbar.finish()\n# together(100000000)\n","repo_name":"codetriage-readme-bot/congredi","sub_path":"congredi/utils/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6258094985","text":"\"\"\"\"\nSettings:\n pos_id\n second_key\n client_id\n client_secret\n\"\"\"\nimport json\nimport logging\nimport os\nfrom urllib.parse import urljoin\n\nimport requests\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django_fsm import can_proceed\n\nfrom getpaid.post_forms import PaymentHiddenInputsPostForm\nfrom getpaid.processor import BaseProcessor\nfrom getpaid.status import PaymentStatus as ps\n\nlogger = logging.getLogger(__name__)\n\n\nclass PaymentProcessor(BaseProcessor):\n slug = \"dummy\"\n display_name = \"Dummy\"\n accepted_currencies = [\n \"PLN\",\n \"EUR\",\n ]\n ok_statuses = [200]\n method = \"REST\" # Supported modes: REST, POST, GET\n confirmation_method = \"PUSH\" # PUSH or PULL\n post_form_class = PaymentHiddenInputsPostForm\n post_template_name = \"dummy/payment_post_form.html\"\n _token = None\n standard_url = reverse_lazy(\"paywall:gateway\")\n api_url = reverse_lazy(\"paywall:api_register\")\n\n def get_paywall_method(self):\n return self.get_setting(\"paywall_method\", self.method)\n\n def get_confirmation_method(self):\n return self.get_setting(\"confirmation_method\", self.confirmation_method).upper()\n\n def get_paywall_baseurl(self, request=None, **kwargs):\n if request is None:\n base = os.environ.get(\"_PAYWALL_URL\")\n else:\n base = os.environ[\"_PAYWALL_URL\"] = request.build_absolute_uri(\"/\")\n if self.get_paywall_method() == \"REST\":\n return urljoin(base, str(self.api_url))\n return urljoin(base, str(self.standard_url))\n\n def get_params(self):\n base = self.get_paywall_baseurl()\n params = {\n \"ext_id\": self.payment.id,\n \"value\": self.payment.amount_required,\n \"currency\": self.payment.currency,\n \"description\": self.payment.description,\n \"success_url\": urljoin(\n base,\n reverse(\"getpaid:payment-success\", kwargs={\"pk\": str(self.payment.pk)}),\n ),\n \"failure_url\": urljoin(\n base,\n reverse(\"getpaid:payment-failure\", kwargs={\"pk\": str(self.payment.pk)}),\n ),\n }\n if self.get_confirmation_method() == \"PUSH\":\n params[\"callback\"] = urljoin(\n base, reverse(\"getpaid:callback\", kwargs={\"pk\": str(self.payment.pk)})\n )\n return {k: str(v) for k, v in params.items()}\n\n # Specifics\n def prepare_transaction(self, request, view=None, **kwargs):\n target_url = self.get_paywall_baseurl(request)\n params = self.get_params()\n method = self.get_paywall_method()\n if method == \"REST\":\n response = requests.post(target_url, json=params)\n if response.status_code in self.ok_statuses:\n self.payment.confirm_prepared()\n self.payment.save()\n return HttpResponseRedirect(response.json()[\"url\"])\n elif method == \"POST\":\n self.payment.confirm_prepared()\n self.payment.save()\n form = self.get_form(params)\n return TemplateResponse(\n request=request,\n template=self.get_template_names(view=view),\n context={\"form\": form, \"paywall_url\": target_url},\n )\n else:\n # GET payments are a bit tricky. You can either confirm payment as\n # prepared here, or on successful return from paywall.\n self.payment.confirm_prepared()\n self.payment.save()\n return HttpResponseRedirect(target_url)\n\n def handle_paywall_callback(self, request, **kwargs):\n new_status = json.loads(request.body).get(\"new_status\")\n if new_status is None:\n raise ValueError(\"Got no status\")\n elif new_status == ps.FAILED:\n self.payment.fail()\n elif new_status == ps.PRE_AUTH:\n self.payment.confirm_lock()\n elif new_status == ps.PAID:\n if can_proceed(self.payment.confirm_lock): # GET flow needs this\n self.payment.confirm_lock()\n if can_proceed(self.payment.confirm_payment):\n self.payment.confirm_payment()\n if can_proceed(self.payment.mark_as_paid):\n self.payment.mark_as_paid()\n elif can_proceed(self.payment.mark_as_refunded):\n self.payment.mark_as_refunded()\n else:\n raise ValueError(f\"Unhandled new status {new_status}\")\n self.payment.save()\n return HttpResponse(\"OK\")\n\n def fetch_payment_status(self, **kwargs):\n base = self.get_paywall_baseurl()\n response = requests.get(\n urljoin(\n base,\n reverse(\n \"paywall:get_status\", kwargs={\"pk\": str(self.payment.external_id)}\n ),\n )\n )\n if response.status_code not in self.ok_statuses:\n raise Exception(\"Error occurred!\")\n status = response.json()[\"payment_status\"]\n results = {}\n if status == ps.PAID:\n results[\"callback\"] = \"confirm_payment\"\n elif status == ps.PRE_AUTH:\n results[\"callback\"] = \"confirm_lock\"\n elif status == ps.PREPARED:\n results[\"callback\"] = \"confirm_prepared\"\n elif status == ps.FAILED:\n results[\"callback\"] = \"fail\"\n return results\n\n def charge(self, amount=None, **kwargs):\n url = urljoin(self.get_paywall_baseurl(), reverse(\"paywall:api_operate\"))\n requests.post(\n url, json={\"id\": str(self.payment.external_id), \"new_status\": ps.PAID}\n )\n\n def release_lock(self, **kwargs):\n url = urljoin(self.get_paywall_baseurl(), reverse(\"paywall:api_operate\"))\n requests.post(\n url, json={\"id\": str(self.payment.external_id), \"new_status\": ps.REFUNDED}\n )\n\n def start_refund(self, amount=None, **kwargs):\n url = urljoin(self.get_paywall_baseurl(), reverse(\"paywall:api_operate\"))\n requests.post(\n url,\n json={\n \"id\": str(self.payment.external_id),\n \"new_status\": ps.REFUND_STARTED,\n },\n )\n\n def cancel_refund(self, **kwargs):\n url = urljoin(self.get_paywall_baseurl(), reverse(\"paywall:api_operate\"))\n requests.post(\n url, json={\"id\": str(self.payment.external_id), \"new_status\": ps.PAID}\n )\n","repo_name":"django-getpaid/django-getpaid","sub_path":"getpaid/backends/dummy/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":438,"dataset":"github-code","pt":"37"} +{"seq_id":"27721793289","text":"# preparing data (cleaning raw data, aggregating and saving to file)\n\n# importing python libraries and opening settings\nimport os\nimport sys\nimport shutil\nimport logging\nimport logging.handlers as handlers\nimport json\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport itertools as it\nimport tensorflow as tf\nfrom tensorflow.keras import preprocessing\n\n# open local settings and change local_scrip_settings if metaheuristic equals True\nwith open('./settings.json') as local_json_file:\n local_script_settings = json.loads(local_json_file.read())\n local_json_file.close()\n\n# import custom libraries\nsys.path.insert(1, local_script_settings['custom_library_path'])\nfrom k_fold_data_creator import k_fold_builder\n\nif local_script_settings['metaheuristic_optimization'] == \"True\":\n with open(''.join([local_script_settings['metaheuristics_path'],\n 'organic_settings.json'])) as local_json_file:\n local_script_settings = json.loads(local_json_file.read())\n local_json_file.close()\n\n# log setup\ncurrent_script_name = os.path.basename(__file__).split('.')[0]\nlog_path_filename = ''.join([local_script_settings['log_path'], current_script_name, '.log'])\nlogging.basicConfig(filename=log_path_filename, level=logging.INFO,\n format='%(asctime)s %(levelname)s %(name)s %(message)s')\nlogger = logging.getLogger(__name__)\nlogHandler = handlers.RotatingFileHandler(log_path_filename, maxBytes=10485760, backupCount=5)\nlogger.addHandler(logHandler)\nlogger.info('_prepare_data module start')\n\n# Random seed fixed\nnp.random.seed(1)\n\n# functions definitions\n\n\ndef prepare():\n print('\\n~prepare_data module~')\n # check if clean is done\n if local_script_settings['data_cleaning_done'] == \"True\":\n print('datasets already cleaned, based in settings info')\n logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n ' raw datasets already cleaned']))\n if local_script_settings['repeat_data_cleaning'] == \"False\":\n print('skipping prepare_data cleaning, as settings indicates')\n return True\n else:\n print('repeating data cleaning again')\n logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n ' cleaning raw datasets']))\n\n # pre-processing core\n try:\n # define filepaths\n raw_data_path = local_script_settings['raw_data_path']\n label_0_raw_data_path = ''.join([raw_data_path, local_script_settings['class_0_folder']])\n label_1_raw_data_path = ''.join([raw_data_path, local_script_settings['class_1_folder']])\n label_0_raw_data_evaluation_path = ''.join([raw_data_path, local_script_settings['class_0_evaluation_folder']])\n label_1_raw_data_evaluation_path = ''.join([raw_data_path, local_script_settings['class_1_evaluation_folder']])\n\n # extract files\n if not os.path.isfile(''.join([local_script_settings['raw_data_path'], 'images_localization.txt'])):\n images_label_0 = [''.join([label_0_raw_data_path, filename])\n for filename in os.listdir(label_0_raw_data_path)]\n images_label_1 = [''.join([label_1_raw_data_path, filename])\n for filename in os.listdir(label_1_raw_data_path)]\n evaluation_images_label_0 = [''.join([label_0_raw_data_evaluation_path, filename])\n for filename in os.listdir(label_0_raw_data_evaluation_path)]\n evaluation_images_label_1 = [''.join([label_1_raw_data_evaluation_path, filename])\n for filename in os.listdir(label_1_raw_data_evaluation_path)]\n\n images_loc = ','.join(images_label_0 + images_label_1)\n evaluation_images_loc = ','.join(evaluation_images_label_0 + evaluation_images_label_1)\n\n # save\n with open(''.join([local_script_settings['raw_data_path'], 'images_localization.txt']), 'w') as f:\n f.write(images_loc)\n f.close()\n images_loc = images_loc.split(',')\n with open(''.join([local_script_settings['raw_data_path'], 'evaluation_images_localization.txt']), 'w') as f:\n f.write(evaluation_images_loc)\n f.close()\n evaluation_images_loc = evaluation_images_loc.split(',')\n else:\n with open(''.join([local_script_settings['raw_data_path'], 'images_localization.txt'])) as f:\n chain = f.read()\n images_loc = chain.split(',')\n f.close()\n with open(''.join([local_script_settings['raw_data_path'], 'evaluation_images_localization.txt'])) as f:\n chain = f.read()\n evaluation_images_loc = chain.split(',')\n f.close()\n nof_images = len(images_loc)\n eval_nof_images = len(evaluation_images_loc)\n print('total jpg images found for training:', nof_images)\n print('total jpg images found for evaluation:', eval_nof_images)\n\n # open raw_data and disaggregation\n # format of training_metadata: [id_number, label, quality_factor, group, filename, filepath]\n id_number = 0\n nof_groups = local_script_settings['nof_K_fold_groups']\n training_metadata = []\n print('first pre-processing step: disaggregation')\n if local_script_settings['disaggregation_done'] == \"False\":\n # train dataset\n for image_path in images_loc:\n filename = image_path.split('/')[-1]\n # train_data_path_template = local_script_settings['raw_data_path']\n\n # K fold disaggregation\n k_fold_instance = k_fold_builder()\n group = np.int(k_fold_instance.assign(id_number, nof_groups))\n\n # detecting the label by folder or filename\n if 'cat' in image_path:\n label = 'cat'\n # train_data_path = ''.join([train_data_path_template, 'label_0_'])\n elif 'dog' in image_path:\n label= 'dog'\n # train_data_path = ''.join([train_data_path_template, 'label_1_'])\n else:\n print('label not understood')\n return False\n\n train_data_path_filename = image_path\n training_metadata.append([id_number, label, group, filename, train_data_path_filename])\n id_number += 1\n\n # validation dataset\n id_number = 0\n evaluation_metadata = []\n for image_path in evaluation_images_loc:\n filename = image_path.split('/')[-1]\n\n # detecting the label by folder or filename\n if 'cat' in image_path:\n label = 'cat'\n elif 'dog' in image_path:\n label = 'dog'\n else:\n print('label not understood')\n return False\n\n evaluation_data_path_filename = image_path\n evaluation_metadata.append([id_number, label, filename, evaluation_data_path_filename])\n id_number += 1\n\n # save clean metadata source for use in subsequent training\n training_metadata_df = pd.DataFrame(training_metadata)\n training_metadata_df.to_csv(''.join([local_script_settings['clean_data_path'],\n 'training_metadata.csv']), index=False, header=None)\n training_metadata_df.to_csv(''.join([local_script_settings['train_data_path'],\n 'training_metadata.csv']), index=False, header=None)\n evaluation_metadata_df = pd.DataFrame(evaluation_metadata)\n evaluation_metadata_df.to_csv(''.join([local_script_settings['models_evaluation_path'],\n 'evaluation_metadata.csv']), index=False, header=None)\n evaluation_metadata_df.to_csv(''.join([local_script_settings['models_evaluation_path'],\n 'evaluation_metadata.csv']), index=False, header=None)\n np.save(''.join([local_script_settings['clean_data_path'], 'training_metadata_np']),\n training_metadata)\n np.save(''.join([local_script_settings['clean_data_path'], 'evaluation_metadata_np']),\n evaluation_metadata)\n print('train and evaluation data -and their metadata- saved to file')\n logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n ' successful saved training data and correspondent metadata']))\n with open('./settings.json', 'w', encoding='utf-8') as local_wr_json_file:\n local_script_settings['disaggregation_done'] = \"True\"\n json.dump(local_script_settings, local_wr_json_file, ensure_ascii=False, indent=2)\n local_wr_json_file.close()\n print('data aggregation was done')\n elif local_script_settings['disaggregation_done'] == \"True\":\n print('data disaggregation was done previously')\n else:\n print('settings disaggregation not understood')\n return False\n\n # data general_mean based - scaling\n # this step is automatically done in train by ImageDataGenerator\n print('data scaling was correctly prepared')\n\n # data normalization based in moving window\n # this step is included as a pre-processing_function in ImageDataGenerator\n print('data normalization was also prepared as a pre-processing_function (on the fly)')\n\n # save clean metadata source for use in subsequent training\n # if local_script_settings['disaggregation_done'] == \"False\":\n # training_metadata_df = pd.DataFrame(training_metadata)\n # column_names = ['id_number', 'label', 'quality_factor', 'group', 'filename', 'filepath']\n # training_metadata_df.to_csv(''.join([local_script_settings['clean_data_path'],\n # 'training_metadata.csv']), index=False, header=column_names)\n # np.save(''.join([local_script_settings['clean_data_path'], 'training_metadata_np']),\n # training_metadata)\n # np.savetxt(''.join([local_script_settings['clean_data_path'], 'training_metadata_np_to.csv']),\n # training_metadata, fmt='%10.15f', delimiter=',', newline='\\n')\n # print('train data -and their metadata- saved to file')\n # logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n # ' successful saved training data and correspondent metadata']))\n except Exception as e1:\n print('Error at pre-processing raw data')\n print(e1)\n logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n ' data pre-processing error']))\n logger.error(str(e1), exc_info=True)\n return False\n\n # save settings\n try:\n if local_script_settings['metaheuristic_optimization'] == \"False\":\n with open('./settings.json', 'w', encoding='utf-8') as local_wr_json_file:\n local_script_settings['data_cleaning_done'] = \"True\"\n json.dump(local_script_settings, local_wr_json_file, ensure_ascii=False, indent=2)\n local_wr_json_file.close()\n elif local_script_settings['metaheuristic_optimization'] == \"True\":\n with open(''.join([local_script_settings['metaheuristics_path'],\n 'organic_settings.json']), 'w', encoding='utf-8') as local_wr_json_file:\n local_script_settings['data_cleaning_done'] = \"True\"\n json.dump(local_script_settings, local_wr_json_file, ensure_ascii=False, indent=2)\n local_wr_json_file.close()\n logger.info(''.join(['\\n', datetime.datetime.now().strftime(\"%d.%b %Y %H:%M:%S\"),\n ' settings modified and saved']))\n print(\"raw datasets cleaned, settings saved..\")\n except Exception as e1:\n print('Error saving settings')\n print(e1)\n logger.error(str(e1), exc_info=True)\n\n # back to main code\n return True\n","repo_name":"pedroMoya/cat_vs_dog_2020","sub_path":"_1_prepare_data.py","file_name":"_1_prepare_data.py","file_ext":"py","file_size_in_byte":12504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8354667383","text":"\"\"\"\n Class definition of SpringRank.\n\"\"\"\n\n\nimport sparse\nimport warnings\nimport numpy as np\nimport scipy.sparse\nimport scipy.sparse.linalg\n\nfrom scipy.optimize import brentq\nfrom compute_metrics import save_metrics\n\nfrom tools import delta_scores\n\nclass SpringRank(object):\n\n def __init__(self, N=100, L=1, gamma=0., l0=1., l1=1., solver='bicgstab', a=0.01, b=20.,\n inf=1e10, verbose=0, force_dense=False, shift_rank=False, get_beta=True,\n out_inference=False, out_folder='../data/output/', in_folder=None, label='',\n cv=False, gt=False, **kargs):\n\n self.N = N # number of nodes\n self.L = L # number of layers\n self.gamma = gamma # regularization penalty - spring constant for the fictitious i <-> origin connections\n self.l0 = l0 # resting length for the fictitious i <-> origin connections\n self.l1 = l1 # resting length for the i <-> j connections\n self.inf = inf # infinity value\n self.force_dense = force_dense # flag for forcing the algorithm to use dense matrices\n self.shift_rank = shift_rank # Flag for shifting ranks to positive\n self.get_beta = get_beta # flag for inferring the inverse temperature parameter\n self.out_inference = out_inference # flag for storing the inferred parameters\n self.out_folder = out_folder # path for storing the output\n self.in_folder = in_folder # path for reading the labels\n self.label = label # additional label for the output\n self.cv = cv # flag for including cv metrics in the output\n self.gt = gt # flat for including metrics wrt ground truth in the output\n\n if solver not in {'spsolve', 'bicgstab'}: # solver used for the linear system\n warnings.warn(f'Unknown parameter {solver} for argument solver. Setting solver = \"bicgstab\"')\n solver = 'bicgstab'\n self.solver = solver\n if a < 0 or a > b: # beta search interval\n raise ValueError(\n 'The [a,b] interval for beta is not valid! a must be positive and b must be greater then a.')\n self.a = a\n self.b = b\n if verbose > 2 and not isinstance(verbose, int): # verbosity indicator\n raise ValueError('The verbosity parameter can only assume values in {0,1,2}!')\n self.verbose = verbose\n\n if self.verbose == 2:\n print(f'Using scipy.sparse.linalg.{self.solver}(A,B)')\n\n def fit(self, data, mask=None):\n \"\"\"\n Model directed networks by using assuming the existence of a hierarchical structure.\n The ranking scores (unidimensional embeddings) are inferred by solving a linear system.\n\n Parameters\n ----------\n data : ndarray/spmatrix\n Has tobe 2 dimensional and with same dimensions.\n\n Returns\n -------\n rank : ndarray\n Array of ranks. Indices represent the nodes' indices used in the input matrix.\n \"\"\"\n\n if self.L > 2:\n raise NotImplementedError('SpringRank for tensors not implemented! Use 2-dimensional input.')\n\n if len(data.shape) > 2:\n data = data[0]\n if mask is not None and len(mask.shape) > 2:\n mask = mask[0]\n\n # check if input is sparse or can be converted to sparse.\n use_sparse = True\n if not self.force_dense and not scipy.sparse.issparse(data):\n try:\n data = scipy.sparse.csr_matrix(data)\n except:\n warnings.warn('The input parameter A could not be converted to scipy.sparse.csr_matrix. '\n 'Using a dense representation.')\n use_sparse = False\n elif self.force_dense:\n use_sparse = False\n\n # build array to feed linear system solver\n if use_sparse:\n A, B = self._build_from_sparse(data)\n else:\n A, B = self._build_from_dense(data)\n\n rank = self._solve_linear_system(A, B)\n\n if self.shift_rank:\n rank = shift_rank(rank)\n self.s = rank\n\n self.beta, self.c = None, None\n if self.get_beta:\n self.beta = self._get_optimal_temperature(data)\n self.c = self._get_sparsity_coefficient(data)\n\n if self.out_inference:\n self._output_results(mask = mask)\n\n return self.s, self.beta, self.c\n\n def _build_from_dense(self, data):\n \"\"\"\n Given as input a 2d numpy array, build the matrices A and B to feed to the linear system solver for SpringRank.\n \"\"\"\n\n k_in = np.sum(data, 0)\n k_out = np.sum(data, 1)\n\n D1 = k_in + k_out # to be seen as diagonal matrix, stored as 1d array\n D2 = self.l1 * (k_out - k_in) # to be seen as diagonal matrix, stored as 1d array\n\n if self.gamma != 0.:\n B = np.ones(self.N) * (self.gamma * self.l0) + D2\n A = - (data + data.T)\n A[np.arange(self.N), np.arange(self.N)] = self.gamma + D1 + np.diagonal(A)\n else:\n last_row_plus_col = (data[n - 1, :] + data[:, n - 1]).reshape((1, self.N))\n A = data + data.T\n A += last_row_plus_col\n\n A[np.arange(self.N), np.arange(self.N)] = A.diagonal() + D1\n D3 = np.ones(self.N) * (\n self.l1 * (k_out[n - 1] - k_in[n - 1])) # to be seen as diagonal matrix, stored as 1d array\n B = D2 + D3\n\n return scipy.sparse.csr_matrix(A), B\n\n def _build_from_sparse(self, data):\n \"\"\"\n Given as input a sparse 2d scipy array, build the matrices A and B to feed to the linear system solver for\n SpringRank.\n \"\"\"\n\n k_in = np.sum(data, 0).A1 # convert matrix of shape (1, n) into 1-dimensional array\n k_out = np.sum(data, 1).A1 # same with (n, 1) matrix\n\n D1 = k_in + k_out # to be seen as diagonal matrix, stored as 1d array\n D2 = self.l1 * (k_out - k_in) # to be seen as diagonal matrix, stored as 1d array\n\n if self.gamma != 0.:\n B = np.ones(self.N) * (self.gamma * self.l0) + D2\n A = - (data + data.T)\n # convert to lil matrix for more efficient computations\n A = A.tolil(copy = False)\n A.setdiag(self.gamma + D1 + A.diagonal())\n else:\n last_row_plus_col = sparse.COO.from_scipy_sparse(\n data[self.N - 1, :] + data[:, self.N - 1].T) # create sparse 1d COO array\n A = data + data.T\n A += last_row_plus_col # broadcast on rows\n A = -A.tocsr() # reconvert to csr scipy matrix\n\n # Notice that a scipy.sparse.SparseEfficiencyWarning will be raised by calling A.setdiag().\n # However converting to lil matrix with\n # A.tolil(copy=False)\n # is not computationally convenient. Just suppress the warning during the call of A.setdiag(...)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", scipy.sparse.SparseEfficiencyWarning)\n A.setdiag(A.diagonal() + D1)\n\n D3 = np.ones(self.N) * (self.l1 * (\n k_out[self.N - 1] - k_in[self.N - 1])) # to be seen as diagonal matrix, stored as 1d array\n B = D2 + D3\n return A, B\n\n def _solve_linear_system(self, A, B):\n\n if self.solver == 'spsolve':\n sol = scipy.sparse.linalg.spsolve(A, B)\n elif self.solver == 'bicgstab':\n sol = scipy.sparse.linalg.bicgstab(A, B, tol = 1e-08, atol = 'legacy')[0]\n return sol.reshape((-1,))\n\n def _get_optimal_temperature(self, A):\n if eq_beta(self.a, self.s, A) * eq_beta(self.b, self.s, A) > 0:\n if self.verbose == 2:\n print(f'Beta update computed in the interval [0,+inf) instead of [0,{self.b}].')\n self.b = self.inf\n return brentq(eq_beta, self.a, self.b, args = (self.s, A))\n\n def _get_sparsity_coefficient(self, A):\n A = A.todense()\n H = - 0.5 * np.power(delta_scores(A.shape[0], self.s), 2) * self.beta\n return A[A != 0].sum() / np.exp(H)[A != 0].sum()\n\n def _output_results(self, mask=None):\n \"\"\"\n Output results in a compressed file.\n Parameters\n ----------\n nodes : list\n List of nodes IDs.\n \"\"\"\n\n # saving s and nodes sorted by s values\n nodes = np.argsort(self.s)[::-1]\n output_parameters = self.out_folder + 'parameters_' + self.label + '_SR'\n\n np.savez_compressed(output_parameters + '.npz', s = self.s, beta = self.beta, c = self.c, nodes = nodes)\n if self.in_folder is not None:\n out = {'s': self.s, 'beta': self.beta, 'c': self.c, 'gamma': self.gamma, 'nodes_s': nodes}\n out_metrics = self.out_folder + 'metrics_' + self.label + '_SR'\n label_path = self.in_folder\n save_metrics(out, label_path, out_metrics, model = 'SR', mask = np.logical_not(mask)[None, :, :] if mask is not None else None,\n cv = self.cv, ground_truth = self.gt)\n\n if self.verbose == 2:\n print()\n print(f'Parameters saved in: {output_parameters}.npz')\n print('To load: theta=np.load(filename), then e.g. theta[\"u\"]')\n if self.in_folder is not None:\n print(f'Metrics saved in: {out_metrics}.csv')\n print('Load as a pandas dataframe.', end = '\\n\\n')\n\n\ndef eq_beta(beta, s, A):\n # optimal beta wrt conditional likelihood (eq.S39)\n N = np.shape(A)[0]\n x = 0\n for i in range(N):\n for j in range(N):\n if A[i, j] == 0:\n continue\n else:\n x += (s[i] - s[j]) * (A[i, j] - (A[i, j] + A[j, i]) / (1 + np.exp(-2 * beta * (s[i] - s[j]))))\n return x\n\n\ndef shift_rank(ranks):\n \"\"\"\n Shifts all scores by translations, so that the minimum is in zero\n and the others are all positive\n \"\"\"\n\n min_r = min(ranks)\n N = len(ranks)\n for i in range(N):\n ranks[i] = ranks[i] - min_r\n return ranks\n","repo_name":"liacov/XOR-rankcom","sub_path":"src/modules/SpringRank.py","file_name":"SpringRank.py","file_ext":"py","file_size_in_byte":10171,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"641557746","text":"# -*- coding: utf-8 -*-\nfrom flask_wtf import FlaskForm\nfrom wtforms import Form, ValidationError\nfrom wtforms import StringField, SubmitField, TextAreaField, BooleanField, FieldList, FormField, IntegerField, PasswordField, SelectField, TextField, DateField, TextAreaField\nfrom wtforms.validators import ValidationError, DataRequired\nfrom wtforms.widgets import TextArea\nfrom .models import Organizations, Vpn_users\n\ndef validate_ip(s):\n a = s.split('.')\n if len(a) != 4:\n return False\n for x in a:\n if not x.isdigit():\n return False\n i = int(x)\n if i < 0 or i > 255:\n return False\n return True\n\n\ndef validate_mask(s):\n if len(s) > 2:\n return False\n for x in s:\n if not x.isdigit():\n return False\n i = int(s)\n if i < 16 or i > 32:\n return False\n return True\n\n\ndef adres_check(form, field):\n if not '/' in field.data:\n raise ValidationError('Отсутствует разделить адреса и маски')\n sp_ip = field.data.split('\\r\\n')\n for ips in sp_ip:\n # Отделяем маску от адре��а\n ip_addr, mask = ips.split('/')\n if not (validate_ip(ip_addr) and validate_mask(mask)):\n raise ValidationError('Ошибка в ип адресе')\n\n\ndef adres_vpn_check(form, field):\n sp_ip = Vpn_users.query.filter_by(adres_vpn=field.data).first()\n if not sp_ip is None:\n raise ValidationError('Такой адрес уже есть для другого клиента')\n\n\nclass LoginForm(FlaskForm):\n username = StringField(\"Username\", validators=[DataRequired()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n remember = BooleanField(\"Remember Me\")\n submit = SubmitField()\n\n\nclass AdminUsersForm(FlaskForm):\n id_user = StringField('User ID')\n name_user = StringField('User Name')\n select_user = BooleanField('Selected user', default=\"unchecked\")\n\n\nclass CreateAdminUserForm(FlaskForm):\n new_login = StringField('Login')\n new_pass = PasswordField('Пароль:')\n new_confirm_pass = PasswordField('Подтверждение пароля')\n user_list = FieldList(FormField(AdminUsersForm), min_entries=0)\n edit_user = SubmitField(\"Редактировать выбранных\")\n new_user = SubmitField(\"Добавить\")\n delete_user = SubmitField(\"Удалить выбранных\")\n\n\nclass EditAdminUserForm(FlaskForm):\n login = StringField('Login')\n new_pass = PasswordField('Пароль:')\n new_confirm_pass = PasswordField('Подтверждение пароля')\n field_user_id = IntegerField('user_id')\n save_user = SubmitField(\"Сохранить\")\n cancel_user = SubmitField(\"Отменить\")\n\n\n\nclass VpnUsersForm(FlaskForm):\n vpn_login = StringField('Имя пользователя')\n vpn_organizations = StringField('Организация')\n allowedips_ip = TextField('IP адрес')\n allowedips_mask = StringField('Маска')\n adres_vpn = StringField('Адрес клиента')\n new_user = SubmitField(\"Новый пользователь\")\n edit_user = SubmitField(\"Редактировать пользователя\")\n delete_user = SubmitField(\"Удалить выбранных\")\n get_setting = SubmitField(\"Скачать настройки\")\n v_user = BooleanField('Visible user ')\n vpn_organizations_sel = SelectField('Организация', choices=[(row.id_organizations, row) for row in Organizations.query.all()])\n\nclass NewVpnUserForm(FlaskForm):\n new_vpn_login = StringField('Имя пользователя', validators=[DataRequired()])\n new_vpn_organizations = SelectField('Организация')\n email_vpn_users = StringField('E-mail адрес', validators=[DataRequired()])\n allowedips_ip = StringField('IP адрес')\n allowedips_mask = StringField('Маска')\n adres_vpn = StringField('Адрес клиента', validators=[adres_vpn_check])\n adres = TextAreaField('Список доступа:', validators=[adres_check])\n dt_activations = DateField('Дата активации пользователя')\n dt_disable_vpn_users = DateField('Дата отключения пользователя')\n now_active = BooleanField('Активировать', default=\"checked\")\n save_user = SubmitField(\"Сохранить пользователя\")\n cancel_user = SubmitField(\"Отменить\")\n\n\n\nclass EditVpnUserForm(FlaskForm):\n vpn_login = StringField('Имя пользователя', validators=[DataRequired()])\n edit_vpn_organizations = SelectField('Организация', validators=[DataRequired()], choices=[(row.id_organizations, row) for row in Organizations.query.all()])\n email_vpn_users = StringField('E-mail адрес', validators=[DataRequired()])\n allowedips_ip = StringField('IP адреса', widget=TextArea(), validators=[adres_check])\n adres_vpn = StringField('Адрес клиента')\n dt_disable_vpn_users = DateField('Дата отключения пользователя')\n save_user = SubmitField(\"Сохранить пользователя\")\n cancel_user = SubmitField(\"Отменить\")\n\nclass OrganizationsForm(FlaskForm):\n id_organizations = IntegerField('Id')\n name_organizations = StringField('Наименование организации')\n server_organizations = StringField('IP адрес сервера')\n port = StringField('Порт адрес сервера')\n subnet = StringField('Подсеть')\n public_vpn_key_organizations = StringField('Публичный ключ')\n private_vpn_key_organizations = StringField('Закрытый ключ')\n add_org = SubmitField(\"Добавить организацию\")\n del_org = SubmitField(\"Удалить выбранные организации\")\n\n\nclass Apple_hostsForm(FlaskForm):\n id_apple_hosts = IntegerField('Id')\n host_name = StringField('HostName')\n name_org = SelectField('Организация')\n id_org = IntegerField('Id Org')\n add_work_host = SubmitField(\"Добавить узел\")\n del_work_host = SubmitField(\"Удалить выбранный узел\")\n\n\nclass LogginViewForm(FlaskForm):\n id_login = IntegerField('Id')\n user_id = IntegerField('Id')\n admin_name = StringField('Имя администратора')\n descr = StringField('Событие')\n dt_event = DateField('Дата')","repo_name":"Rykovskov/wireguard1","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32893915378","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nimport pickle\n\n# Load the dataset\ndataset_path = 'Data/Fertilizer Prediction.csv'\ndata = pd.read_csv(dataset_path)\n\n# Rename columns\ndata.rename(columns={'Humidity ': 'Humidity', 'Soil Type': 'Soil_Type', 'Crop Type': 'Crop_Type', 'Fertilizer Name': 'Fertilizer'}, inplace=True)\n\n# Initialize LabelEncoder\nencode_soil = LabelEncoder()\nencode_crop = LabelEncoder()\n\n\n# Fit and transform the categorical features\ndata.Soil_Type = encode_soil.fit_transform(data.Soil_Type)\ndata.Crop_Type = encode_crop.fit_transform(data.Crop_Type)\n\n\n\n# Separate the features and labels\nfeatures = data[['Nitrogen', 'Potassium', 'Phosphorous', 'Temperature', 'Humidity', 'Moisture', 'Crop_Type', 'Soil_Type']]\nlabels = data['Fertilizer']\n\n# Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=42)\n\n# Create a Decision Tree classifier\nclassifier = DecisionTreeClassifier()\n\n# Train the model\nclassifier.fit(X_train, y_train)\n\n# Make predictions on the test set\npredictions = classifier.predict(X_test)\n\n# Calculate accuracy\naccuracy = classifier.score(X_test, y_test)\nprint(\"Accuracy:\", accuracy)\n\nmodel_path = 'utils/Fertilizer_Prediction.pkl'\n# Dump the model using pickle\nwith open(model_path, 'wb') as file:\n pickle.dump(classifier, file)\n","repo_name":"Harshvardhan-45/Crop-prediction","sub_path":"utils/Fertilizer_Prediction.py","file_name":"Fertilizer_Prediction.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1397802898","text":"from tkinter import filedialog\nfrom typing import List, Optional\n\nfrom threading import Thread, Event\n\nfrom tkinter import BOTTOM, Tk, RIGHT, BOTH, RAISED, CENTER, IntVar, LEFT, Canvas, E\nfrom tkinter.filedialog import FileDialog\nfrom tkinter.ttk import Frame, Button, Style, Scale, Label\nfrom PIL import ImageTk, Image\nfrom time import sleep\n\n\nclass AnimationThead(Thread):\n variable: IntVar\n running: bool = False\n rate: int = 1\n\n min: int\n max: int\n\n quit_event: Event\n\n def __init__(self, variable: IntVar, min: int, max: int, interval: float = 1.5):\n super().__init__()\n self.variable = variable\n self.interval = interval\n\n self.min = min\n self.max = max\n\n self.quit_event = Event()\n \n def run(self):\n try:\n while not self.quit_event.is_set():\n sleep(self.interval)\n value = self.variable.get()\n\n if value == self.max:\n self.rate = -1\n \n if value == self.min:\n self.rate = 1\n\n if self.running:\n self.variable.set(value+self.rate)\n except Exception:\n pass\n\n\nclass SolutionDisplay(Frame):\n N: int\n W: int\n H: int\n\n animation_thread: AnimationThead\n\n solutions: Optional[List[str]]\n images: List[Image.Image]\n\n scroll_var: IntVar\n\n #Labels\n progress_label: Label\n action_label: Label \n\n animate_button: Button\n\n def __init__(self, solutions: Optional[List[str]], images: List[Image.Image]):\n super().__init__()\n\n self.solutions = solutions\n self._pil_images = images\n self.images = [*map(ImageTk.PhotoImage, images)]\n\n self.W = self.images[0].width()\n self.H = self.images[0].height()\n\n self.animation_thread = None\n\n if self.solutions is not None:\n self.N = len(self.solutions)\n else:\n self.N = len(self.images)\n\n self.initUI()\n \n def quit(self):\n if self.animation_thread is not None:\n self.animation_thread.quit_event.set()\n \n def toggle_animation(self, *args):\n if self.animation_thread is None:\n self.animation_thread = AnimationThead(self.scroll_var, 1, self.N)\n self.animation_thread.start()\n\n if self.animation_thread is not None:\n val = self.animation_thread.running\n val ^= True\n self.animation_thread.running = val\n\n if val:\n self.animate_button.config(text=\"Stop\")\n else:\n self.animate_button.config(text=\"Animate\")\n \n def save(self, *action):\n curr = self.scroll_var.get()\n path = filedialog.asksaveasfilename(filetypes=[(\"PNG\", \".png\"), (\"JPEG\", \".jpg\")])\n if path is not None and len(path) > 0:\n self._pil_images[curr-1].save(path)\n \n def change_step(self, n):\n self.progress_label.config(text=f\"{n}/{self.N}\")\n\n if self.solutions is not None:\n self.action_label.config(text=str(self.solutions[n-1]))\n\n cw = self.canvas.winfo_width()\n ch = self.canvas.winfo_height()\n self.canvas.config(image=self.images[n-1])\n pass\n\n def trace_scroll(self, *args):\n val = self.scroll_var.get()\n self.change_step(val)\n \n def speed_event(self, change):\n def callback(*args):\n if self.animation_thread is None:\n return\n\n interval = self.animation_thread.interval\n interval = max(min(interval + change*0.2, 3), .2)\n self.animation_thread.interval = interval\n \n return callback\n \n def next_event(self, step):\n def callback(*args):\n st = self.scroll_var.get()\n st += step\n self.scroll_var.set(st)\n \n return callback\n\n def initUI(self):\n\n self.master.title(\"Buttons\")\n self.style = Style()\n self.style.theme_use(\"default\")\n\n frame = Frame(self, relief=RAISED, borderwidth=1)\n frame.pack(fill=BOTH, expand=True)\n\n self.canvas = canvas = Label(frame)\n canvas.pack(expand=1, anchor=CENTER)\n\n self.pack(fill=BOTH, expand=True)\n\n self.scroll_var = scroll_var = IntVar(value=1)\n scroll_var.trace('w', self.trace_scroll)\n\n buttons_frame = Frame(self, borderwidth=1)\n buttons_frame.pack(side=RIGHT)\n #Down\n down_button = Button(buttons_frame, text=\"🠗\", width=2)\n down_button.grid(row=0, column=0, padx=5, pady=5)\n down_button.bind('', self.speed_event(-1))\n #Animate\n self.animate_button = animate_button = Button(buttons_frame, text=\"Animate\")\n animate_button.grid(row=0, column=1, sticky='news', padx=5, pady=5)\n #Up\n up_button = Button(buttons_frame, text=\"🠕\", width=2)\n up_button.grid(row=0, column=2, padx=5, pady=5)\n up_button.bind('', self.speed_event(-1))\n #Left\n left_button = Button(buttons_frame, text=\"🠔\", width=2)\n left_button.grid(row=1, column=0, padx=5, pady=5)\n left_button.bind('', self.next_event(-1))\n #Save\n save_button = Button(buttons_frame, text=\"Save\")\n save_button.grid(row=1, column=1, sticky='news', padx=5, pady=5)\n save_button.bind('', self.save)\n #Right\n right_button = Button(buttons_frame, text=\"🠖\", width=2)\n right_button.grid(row=1, column=2, padx=5, pady=5)\n right_button.bind('', self.next_event(1))\n\n scale = Scale(self, variable = scroll_var, from_=1, to=self.N)\n scale.pack(anchor=CENTER, padx=5, pady=5, fill=\"both\")\n\n self.progress_label = progress_label = Label(self, text=\"1/10\")\n progress_label.pack(side=LEFT, padx=5, pady=5, fill=\"none\")\n\n self.action_label = action_label = Label(self, text=\"\")\n action_label.pack(anchor=CENTER, padx=5, pady=5, fill=\"both\")\n\n # Add animate button handler\n animate_button.bind('', self.toggle_animation)\n\n scroll_var.set(1)","repo_name":"hndregjoni/search_algorithms_hw","sub_path":"common/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15587630909","text":"#coding=utf-8\n\nimport math\nimport rospy\nfrom geometry_msgs.msg import Point\nfrom visualization_msgs.msg import Marker\nimport msg_helpers\n\nclass LinesHelper:\n def __init__(self, topic, color):\n self.pub = rospy.Publisher(topic, Marker, queue_size = 1)\n self.marker = self.__init_marker(color)\n\n def __init_marker(self, rgba):\n marker = Marker()\n marker.type = Marker.LINE_LIST\n marker.header.frame_id = 'map'\n marker.scale.x = 0.01\n marker.scale.y = 0.01\n marker.scale.z = 0.01\n marker.color.r = rgba[0]\n marker.color.g = rgba[1]\n marker.color.b = rgba[2]\n marker.color.a = rgba[3]\n return marker\n\n def lines(self, lines):\n self.marker.points = []\n for line in lines:\n for i in range(len(line)-1):\n self.marker.points.append(msg_helpers.array_to_point(line[i]))\n self.marker.points.append(msg_helpers.array_to_point(line[i+1]))\n\n self.pub.publish(self.marker)\n","repo_name":"Garrus007/jetson_car","sub_path":"jetson_car/scripts/motion_planner/rviz_helpers.py","file_name":"rviz_helpers.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19052794267","text":"import configparser\n\nRUN_INFO_VER = \"2020_7_14\"\n\n\nclass Run(object):\n def __init__(\n self, sensor, bias, file_name, temperature, cycle, dut_ch, trig_ch, cuts\n ):\n self.sensor = sensor\n self.bias = bias\n self.file_name = file_name\n self.temperature = temperature\n self.cycle = cycle\n self.dut_ch = dut_ch\n self.trig_ch = trig_ch\n self.cuts = cuts\n\n\nclass ConfigReader(object):\n @staticmethod\n def open(config=f\"run_info_v{RUN_INFO_VER}.ini\"):\n config_file = configparser.ConfigParser()\n config_file.read(config)\n\n sensor = config_file[\"header\"][\"sensor\"]\n\n dut_ch = config_file[\"header\"][\"dut_channel\"]\n trig_ch = config_file[\"header\"][\"trigger_channel\"]\n\n run_list = []\n\n header = {key: config_file[\"header\"][key] for key in config_file[\"header\"]}\n\n for run_num in range(int(config_file[\"header\"][\"number_of_runs\"])):\n\n fname = config_file[f\"run{run_num}\"][\"file_name\"]\n\n cycle = 1\n if \"root.\" in fname:\n cycle = int(fname.split(\"root.\")[1])\n\n bias = int(config_file[f\"run{run_num}\"][\"bias\"].split(\"V\")[0])\n try:\n temperature = config_file[f\"run{run_num}\"][\"temperature\"]\n except:\n temperature = -30\n\n raw_cut = config_file[f\"run{run_num}\"][f\"cut_{dut_ch}\"].split(\" \")\n\n dut_cut = \"tmax{dut_ch}[0]-cfd{trig_ch}[20] > {dut[0]} && tmax{dut_ch}[0]-cfd{trig_ch}[20] < {dut[1]} && pmax{dut_ch}[0] > {dut[2]} && pmax{dut_ch}[0] < {dut[3]}\".format(\n dut_ch=dut_ch, trig_ch=trig_ch, dut=raw_cut[:4]\n )\n\n trig_cut = \"tmax{trig_ch}[0]-cfd{trig_ch}[20] > {trig[0]} && tmax{trig_ch}[0]-cfd{trig_ch}[20] < {trig[1]} && pmax{trig_ch}[0] > {trig[2]} && pmax{trig_ch}[0] < {trig[3]}\".format(\n trig_ch=trig_ch, trig=raw_cut[4:8]\n )\n if len(raw_cut) == 8:\n cuts = f\"{dut_cut} && {trig_cut}\"\n elif len(raw_cut) == 9:\n cuts = f\"{dut_cut} && {trig_cut} && {raw_cut[8]}\"\n else:\n cuts = f\"{dut_cut} && {trig_cut}\"\n\n run_list.append(\n Run(sensor, bias, fname, temperature, cycle, dut_ch, trig_ch, cuts)\n )\n\n return (header, run_list)\n","repo_name":"neko-0/HGTD_BetaScope_FW_Test","sub_path":"scripts/betaScope_pyScript/result_parser/config_reader.py","file_name":"config_reader.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72452021494","text":"class checkersBoard:\n def __init__ (self):\n self.grid = [[' ',1, 2, 3, 4, 5, 6, 7, 8],\n ['A',' ','b',' ','b',' ','b',' ','b'],\n ['B','b',' ','b',' ','b',' ','b',' '],\n ['C',' ','b',' ','b',' ','b',' ','b'],\n ['D',' ',' ',' ',' ',' ',' ',' ',' '],\n ['E',' ',' ',' ',' ',' ',' ',' ',' '],\n ['F','w',' ','w',' ','w',' ','w',' '],\n ['G',' ','w',' ','w',' ','w',' ','w'],\n ['H','w',' ','w',' ','w',' ','w',' '],\n ]\n self.rowdict = {'A':1,'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8}\n self.coldict = {'1':1,'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8}\n\n def displayBoard (self):\n for row in self.grid :\n # Build up the string for the row\n rowString = \"\"\n # Loop through the values in the row\n for value in row :\n # Add one value to the string\n rowString = rowString + str(value) + \" | \"\n # Print out the entire row\n print (rowString)\n print (\"-----------------------------------\")\n \n def moves (self, currpos, nextpos):\n currRow = currpos[0]\n currCol = currpos[1]\n row = self.rowdict.get(currRow)\n col = self.coldict.get(currCol)\n nextR = nextpos[0]\n nextC = nextpos[1]\n nextRow = self.rowdict.get(nextR)\n nextCol = self.coldict.get(nextC)\n self.grid[nextRow][nextCol] = self.grid[row][col]\n self.grid[row][col] = ' '\n return self.grid\n\n def capture (self, currpos, nextpos):\n currRow = currpos[0]\n currCol = currpos[1]\n row = self.rowdict.get(currRow)\n col = self.coldict.get(currCol)\n nextR = nextpos[0]\n nextC = nextpos[1]\n nextRow = self.rowdict.get(nextR)\n nextCol = self.coldict.get(nextC)\n if nextRow == (row+2) and nextCol == (col+2) :\n self.grid[row+1][col+1] = ' '\n elif nextRow == (row+2) and nextCol == (col-2) :\n self.grid[row+1][col-1] = ' '\n elif nextRow == (row-2) and nextCol == (col+2) :\n self.grid[row-1][col+1] = ' '\n elif nextRow == (row-2) and nextCol == (col-2) :\n self.grid[row-1][col-1] = ' '\n \n return self.grid\n \n def gameover (self):\n players_dict = {'w': 0, 'b': 0}\n for row in self.grid:\n for column in row:\n if column == 'w':\n players_dict['w'] = players_dict['w'] + 1\n elif column == 'b':\n players_dict['b'] = players_dict['b'] + 1\n\n if players_dict['w'] == 0 :\n b_wins = 'Player \"b\" wins!:) \\n Player \"w\" loses :('\n return b_wins\n elif players_dict['b'] == 0:\n w_wins = 'Player \"w\" wins!:) \\n Player \"b\" loses :('\n return w_wins\n else:\n return False\n\n def checkPositionW(self, currpos):\n currRow = currpos[0]\n currCol = currpos[1]\n row = self.rowdict.get(currRow)\n col = self.coldict.get(currCol)\n if self.grid[row][col] == 'w':\n return True\n else:\n return False\n\n def checkPositionB(self, currpos):\n currRow = currpos[0]\n currCol = currpos[1]\n row = self.rowdict.get(currRow)\n col = self.coldict.get(currCol)\n if self.grid[row][col] == 'b':\n return True\n else:\n return False\n \n\n\n\n#boardcheckers = checkersBoard()\n#boardcheckers.displayBoard()\n#boardcheckers.moves('F3', 'D5')\n#boardcheckers.capture('F3', 'D5')\n#boardcheckers.displayBoard()\n\ndef main():\n boardcheckers = checkersBoard()\n boardcheckers.displayBoard()\n total_moves = 1000 \n current_piece = ''\n next_move = ''\n user_input = ''\n boardcheckers.gameover()\n while total_moves <= 1000 and user_input != 'quit' and boardcheckers.gameover() == False:\n if total_moves % 2 == 0:\n print(\"Player 'w', it's your turn\" )\n current_piece = input('Type in the place of the piece that you want to move')\n boardcheckers.checkPositionW(current_piece)\n while boardcheckers.checkPositionW(current_piece) == False:\n print(\"The place you entered doesn't have you piece\")\n current_piece = input('Type in the place of the piece that you want to move')\n boardcheckers.checkPositionW(current_piece)\n next_move = input('Type in the place where you want to move the piece')\n boardcheckers.moves(current_piece, next_move)\n boardcheckers.capture(current_piece, next_move)\n\n boardcheckers.displayBoard()\n boardcheckers.gameover()\n \n else:\n print(\"Player 'b', it's your turn\" )\n current_piece = input('Type in the place of the piece that you want to move')\n boardcheckers.checkPositionB(current_piece)\n while boardcheckers.checkPositionB(current_piece) == False:\n print(\"The place you entered doesn't have your piece\")\n current_piece = input('Type in the place of the piece that you want to move')\n boardcheckers.checkPositionW(current_piece)\n next_move = input('Type in the place where you want to move the piece')\n boardcheckers.moves(current_piece, next_move)\n boardcheckers.capture(current_piece, next_move)\n boardcheckers.displayBoard()\n boardcheckers.gameover()\n \n user_input = input('Type \"pass\" to continue the game or \"quit\" to quit the game')\n total_moves = total_moves - 1\n if user_input == 'quit':\n print('The game has ended')\n if boardcheckers.gameover() != False:\n print(boardcheckers.gameover())\n\n\n\nmain()\n \n \n\n\n \n \n\n\n\n\n","repo_name":"myanh-03/Checkers","sub_path":"checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6496506415","text":"import itertools\nimport timeit\nfrom operator import add, mul, sub, truediv\n\noperations = add, mul, sub, truediv\n\nnumber = '595347'\n\n\ndef checkio():\n def results(n):\n yield int(n)\n for i in range(1, len(n)):\n for x, y in itertools.product(results(n[:i]), results(n[i:])):\n yield from (op(x, y) for op in operations) if y else (x,)\n\n return 100 not in results(number)\n\n\nprint(timeit.timeit(checkio, number=10000))\n# def get_groups(data):\n# for split in product([True, False], repeat=5):\n# split_index = [i + 1 for i in range(5) if split[i]]\n# ranges = zip([0] + split_index, split_index + [6]) # list of (start,stop)\n# yield [int(data[i:j]) for i, j in ranges]\n# def checkio(data):\n# numbers_list = list(get_groups(data))\n# while numbers_list:\n# numbers = numbers_list.pop()\n# if len(numbers) == 1:\n# if numbers[0] == 100:\n# return False\n# else:\n# for i in range(len(numbers) - 1):\n# for func in [add, sub, mul, truediv]:\n# if numbers[i + 1] == 0 and func == truediv: # div by zero\n# continue\n# new_numbers = numbers[:]\n# new_numbers[i:i + 2] = [func(numbers[i], numbers[i + 1])]\n# numbers_list.append(new_numbers)\n# return True\n\n\n# These \"asserts\" using only for self-checking and not necessary for auto-testing\n# if __name__ == '__main__':\n# assert checkio('000000') == True, \"All zeros\"\n# assert checkio('707409') == True, \"You can not transform it to 100\"\n# assert checkio('595347') == False, \"(5 + ((9 / (3 / 34)) - 7)) = 100\"\n# assert checkio('271353') == False, \"(2 - (7 * (((1 / 3) - 5) * 3))) = 100\"\n","repo_name":"RomanRusyn/SSTests","sub_path":"Electronic Station/Mathematically Lucky Tickets/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30438009534","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('',views.apiOverview,name='api-overview'),\n path('task-list',views.taskList,name='task-list'),\n path('create-task',views.createTask,name='create-task'),\n path('update-task/',views.updateTask,name='update-task'),\n path('delete-task/',views.deleteTask,name='delete-task'),\n path('remove-completed/',views.removeCompleted,name='remove-completed'),\n path('clearall',views.clearAll,name='clearall')\n \n]","repo_name":"sanjaii/todo-api","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29650568524","text":"import numpy as np\nfrom icecream import ic\nimport pandas as pd\n\nfrom context.domains import Dataset\nfrom context.models import Model\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nclass TitanicModel(object):\n model = Model()\n dataset = Dataset()\n\n def preprocess(self, train_fname, test_fname):\n '''\n 1. entity 를 object로 전환.\n 2. train, test, validation(id, label) => object\n 3. garbage drop\n 4. signal과 noise가 섞인(name) 컬럼의 값 분리 후 noise drop\n 5. 자연어로 되어있는 데이터 들을 CPU가 처리 할 수 있게 int 값으로 변경\n '''\n this = self.dataset\n that = self.model\n this.train = that.new_dframe(train_fname)\n this.test = that.new_dframe(test_fname)\n this.id = this.test['PassengerId']\n this.label = this.train['Survived']\n this.train.drop('Survived', axis=1, inplace=True)\n this = self.drop_feature(this, 'Cabin', 'Parch', 'Ticket', 'SibSp')\n self.df_info(this)\n\n this = self.extract_title_from_name(this)\n title_mapping = self.remove_duplicate(this)\n this = self.name_nominal(this, title_mapping)\n this = self.drop_feature(this, 'Name')\n this = self.sex_nominal(this)\n this = self.drop_feature(this, 'Sex')\n this = self.embarked_nominal(this)\n this = self.age_ratio(this)\n this = self.drop_feature(this, 'Age')\n this = self.fare_ratio(this)\n this = self.drop_feature(this, 'Fare')\n\n k_fold = self.create_k_fold()\n acc = self.get_accuracy(this, k_fold)\n ic(acc)\n\n # self.print_this(this)\n # ic(this.train.head(20))\n # ic(this.test.head(20))\n\n '''\n this = self.pclass_ordinal(this)\n '''\n return this\n\n @staticmethod\n def df_info(this):\n [print(f'{i.info()}') for i in [this.train, this.test]]\n\n @staticmethod\n def print_this(this):\n print('*' * 100)\n ic(f'1. Train 의 타입 : {type(this.train)}\\n')\n ic(f'2. Train 의 컬럼 : {type(this.train.columns)}\\n')\n print(f'3. Train 의 상위 3개 : {this.train.head(3)}\\n')\n ic(f'4. Train 의 null의 개수 : {this.train.isnull().sum()}\\n')\n ic(f'5. Test 의 타입 : {type(this.test)}\\n')\n ic(f'6. Test 의 컬럼 : {this.test.columns}\\n')\n ic(f'7. Test 의 상위 3개 : {this.test.head(3)}\\n')\n ic(f'8. Test 의 null의 개수 : {this.test.isnull().sum()}\\n')\n ic(f'9. id 의 타입 : {type(this.id)}\\n')\n ic(f'10. id 의 상위 10개 : {this.id[:10]}\\n')\n print('*' * 100)\n\n @staticmethod\n def create_train(this) -> object:\n return this\n\n @staticmethod\n def drop_feature(this, *feature):\n [i.drop(list(feature), axis=1, inplace=True) for i in [this.train, this.test]]\n # https://www.geeksforgeeks.org/how-to-drop-one-or-multiple-columns-in-pandas-dataframe/\n # multiple drop\n return this\n\n @staticmethod\n def kwargs_sample(**kwargs) -> None:\n [print(f'{key} is {value}') for key, value in kwargs.items()]\n\n '''\n Categorical vs. Quantitative\n Cate -> nominal (이름) vs. ordinal (순서)\n Quan -> interval (상대) vs. ratio (절대)\n '''\n\n @staticmethod\n def pclass_ordinal(this) -> object:\n return this\n\n @staticmethod\n def extract_title_from_name(this):\n combine = [this.train, this.test]\n for dataset in combine:\n dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\\.', expand=False)\n return this\n\n @staticmethod\n def remove_duplicate(this) -> {}:\n a = set()\n [a.update(set(dataset['Title'])) for dataset in [this.train, this.test]]\n # print(a)\n title_mapping = {'Mr': 1, 'Ms': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}\n '''\n Royal (왕족) : ['Countess', 'Lady', 'Sir']\n Rare : ['Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona', 'Mme']\n Mr : ['Mr', 'Mlle']\n Ms : ['Miss', 'Ms']\n Master : ['Master']\n Mrs : ['Mrs']\n '''\n return title_mapping\n\n @staticmethod\n def name_nominal(this, title_mapping):\n for these in [this.train, this.test]:\n these['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal', inplace=True)\n these['Title'].replace(\n ['Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona', 'Mme'], 'Rare', inplace=True)\n these['Title'].replace(['Mlle'], 'Mr', inplace=True)\n these['Title'].replace(['Miss'], 'Ms', inplace=True)\n # Mr, Ms, Master, Mrs는 변화 없음.\n these['Title'].fillna(0, inplace=True)\n these['Title'] = these['Title'].map(title_mapping)\n return this\n\n @staticmethod\n def age_ratio(this) -> object:\n age_mapping = {'Unknown': 0, 'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4,\n 'Young Adult': 5, 'Adult': 6, 'Senior': 7}\n [these['Age'].fillna(-0.5, inplace=True) for these in [this.train, this.test]]\n # Q 왜 Nan 값에 -0.5 를 할당 할까?\n # A bins 의 Unknown 값의 범위는 -1 ~ 0 ( -1 초과 0 이하 right = true(default)) 이라서 -1을 주게 된다면 NaN 값이 되어버린다.\n # etc.. Unknown 값을 0을 주게 되면 포함이 되는데 그냥 0 줘도 되는게 아닌지?\n bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] # 이것을 이해해 봐라.\n # bins[i] ~ bins[i+1] 구간의 label이므로 bins가 하나 많게 된다.\n # labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']\n # for these in [this.train, this.test]:\n # these['AgeGroup'] = pd.cut(these['Age'], bins=bins, labels=labels) # pd.cut()을 사용\n # these['AgeGroup'] = these['AgeGroup'].map(age_mapping) # map()을 사용\n\n labels = [0, 1, 2, 3, 4, 5, 6, 7]\n # labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']\n for these in [this.train, this.test]:\n these['AgeGroup'] = pd.cut(these['Age'], bins=bins, right=False, labels=labels) # pd.cut()을 사용\n ic(type(this.train['AgeGroup'][0]))\n return this\n\n @staticmethod\n def sex_nominal(this) -> object:\n gender_mapping = {'male': 0, 'female': 1}\n for these in [this.train, this.test]:\n these['Gender'] = these['Sex'].map(gender_mapping)\n return this\n\n @staticmethod\n def embarked_nominal(this) -> object:\n '''test 는 null 없으니까 train 만 처리한다.\n \n null 값은 정규분포를 따라서 정해준다.\n 고로 탑승 항구 3개중 가장 많은 곳으로 보내버리면 되나...?\n Title을 보고 해당 Title을 가진 집단에서의 가장 많은곳으로 보내면 될것같다.\n 가 아니라 정규분포를 가장 맞춰 줄 수 있는 값으로 정해준다.\n print(these['Embarked'].value_counts())\n S 644, C 168, Q 77\n S 270, C 102, Q 46'''\n embarked_mapping = {'S': 1, 'C': 2, 'Q': 3}\n this.train = this.train.fillna({'Embarked': 'S'})\n for these in [this.train, this.test]:\n these['Embarked'] = these['Embarked'].map(embarked_mapping)\n return this\n\n @staticmethod\n def fare_ratio(this) -> object:\n this.test['Fare'] = this.test['Fare'].fillna(1)\n for these in [this.train, this.test]:\n these['FareBand'] = pd.qcut(these['Fare'], 4, labels={1, 2, 3, 4})\n # print(f'qcut 으로 bins 값 설정 {this.train[\"FareBand\"].head()}')\n bins = [-1, 8, 15, 31, np.inf]\n return this\n\n @staticmethod\n def create_k_fold() -> object:\n return KFold(n_splits=10, shuffle=True, random_state=0)\n\n @staticmethod\n def get_accuracy(this, k_fold):\n score = cross_val_score(RandomForestClassifier(), this.train, this.label,\n cv=k_fold, n_jobs=1, scoring='accuracy')\n return round(np.mean(score) * 100, 2)\n\n def learning(self, train_fname, test_fname):\n this = self.preprocess(train_fname, test_fname)\n k_fold = self.create_k_fold()\n ic(f'사이킷런 알고리즘 정확도: {self.get_accuracy(this, k_fold)}')\n self.submit(this)\n\n @staticmethod\n def submit(this):\n clf = RandomForestClassifier()\n clf.fit(this.train, this.label)\n prediction = clf.predict(this.test)\n pd.DataFrame({'PassengerId': this.id, 'Survived': prediction}).to_csv('./save/submission.csv', index=False)\n","repo_name":"MinhyeSim/titanic_pycharm","sub_path":"titanic/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8796,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"15465765348","text":"from player import HumanPlayer, RandomCompPlayer, GeniusComputerPlayer\r\nimport time\r\n\r\n\r\nclass TicTacToe:\r\n def __init__(self):\r\n self.board = [' ' for i in range(9)]\r\n self.current_winner = None\r\n\r\n def print_board(self):\r\n for row in [self.board[i*3: (i+1)*3] for i in range(3)]:\r\n print('| ' + '| '.join(row) + ' |')\r\n\r\n @staticmethod\r\n def print_board_nums():\r\n # 0 | 1 | 2 etc {tells us what number corresponds to what box}\r\n number_board = [\r\n [str(i) for i in range(j*3, (j+1)*3)] for j in range(3)]\r\n for row in number_board:\r\n print('| ' + '| '.join(row) + ' |')\r\n\r\n def available_moves(self):\r\n return [i for i, spot in enumerate(self.board) if spot == ' ']\r\n\r\n def empty_squares(self):\r\n return ' ' in self.board\r\n\r\n def num_empty_squares(self):\r\n return self.board.count(' ')\r\n\r\n def make_move(self, square, letter):\r\n # if valid, make the move\r\n if self.board[square] == ' ':\r\n self.board[square] = letter\r\n # print(type(square),type(letter))\r\n\r\n if self.winner(square, letter):\r\n self.current_winner = letter\r\n return True\r\n return False\r\n\r\n def winner(self, square, letter):\r\n # check row\r\n row_ind = square // 3\r\n row = self.board[row_ind*3: (row_ind + 1) * 3]\r\n if all([spot == letter for spot in row]):\r\n return True\r\n\r\n # check column\r\n col_ind = square % 3\r\n column = [self.board[col_ind + i*3] for i in range(3)]\r\n if all([spot == letter for spot in column]):\r\n return True\r\n\r\n # check diagonal\r\n if square % 2 == 0:\r\n diagonal1 = [self.board[i] for i in [0, 4, 8]]\r\n if all([spot == letter for spot in diagonal1]):\r\n return True\r\n\r\n diagonal2 = [self.board[i] for i in [2, 4, 6]]\r\n if all([spot == letter for spot in diagonal2]):\r\n return True\r\n return False\r\n\r\n\r\ndef play(game, x_player, o_player, print_game=True):\r\n if print_game:\r\n game.print_board_nums()\r\n\r\n letter = 'X' # starting letter\r\n # iterate while the game still has empty squares\r\n # ( we don't have to worry about winner because we'll just that which\r\n # breaks the loop)\r\n while game.empty_squares():\r\n # get the move from an appropriate player\r\n if letter == 'O':\r\n square = o_player.get_move(game)\r\n else:\r\n square = x_player.get_move(game)\r\n\r\n # let's define a function to make a move!\r\n if game.make_move(square, letter):\r\n if print_game:\r\n print(letter + f' makes a move to the square {square}')\r\n game.print_board()\r\n print()\r\n\r\n if game.current_winner:\r\n if print_game:\r\n print(letter + ' wins!')\r\n return letter\r\n # after we make a move we need to alternate letters\r\n letter = 'O' if letter == 'X' else 'X'\r\n\r\n # pause\r\n time.sleep(0.5)\r\n\r\n if print_game:\r\n print('It\\'s a tie! ')\r\n\r\n\r\nif __name__ == '__main__':\r\n x = int(input(\r\n '''Which type of opponent do you want?\r\n1- novice AI \\n2- advanced AI \\n3- PvP \\n'''\r\n ))\r\n if x == 1:\r\n b = True\r\n while b is True:\r\n a = input('Choose if you want to go 1st(1) or 2nd(2): ')\r\n if a == '1':\r\n x_player = HumanPlayer('X')\r\n o_player = RandomCompPlayer('O')\r\n b = False\r\n elif a == '2':\r\n o_player = HumanPlayer('O')\r\n x_player = RandomCompPlayer('X')\r\n b = False\r\n else:\r\n print('Invalid input. Please only enter 1 or 2. Try again.')\r\n elif x == 2:\r\n b = True\r\n while b is True:\r\n a = input('Choose if you want to go 1st(1) or 2nd(2): ')\r\n if a == '1':\r\n x_player = HumanPlayer('X')\r\n o_player = GeniusComputerPlayer('O')\r\n b = False\r\n elif a == '2':\r\n o_player = HumanPlayer('O')\r\n x_player = GeniusComputerPlayer('X')\r\n b = False\r\n else:\r\n print('Invalid input. Please only enter 1 or 2. Try again.')\r\n elif x == 3:\r\n b = True\r\n while b is True:\r\n a = input('Choose if you want to go 1st(1) or 2nd(2): ')\r\n if a == '1':\r\n x_player = HumanPlayer('X')\r\n o_player = HumanPlayer('O')\r\n b = False\r\n elif a == '2':\r\n o_player = HumanPlayer('O')\r\n x_player = HumanPlayer('X')\r\n b = False\r\n else:\r\n print('Invalid input. Please only enter 1 or 2. Try again.')\r\n t = TicTacToe()\r\n play(t, x_player, o_player, print_game=True)\r\n","repo_name":"kb13000231/mini-projects","sub_path":"tic tac toe/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11278366170","text":"#Test Doubles Functions\n#import BMI_DB_Fake, BMI_DB_Mock1, BMI_DB_Mock2, BMI_DB_Stub\nimport BMI_DB\n\nBMI_DB.setup()\n\ndef BMI(height_feet, height_inches, weight):\n\n db_height = str(height_feet) + \"'\" + str(height_inches)\n db_weight = weight\n\n #Checking if DB is empty\n #if not BMI_DB_Stub.isEmpty():\n #BMI_DB_Mock2.retrieveEntries()\n #BMI_DB_Stub.retrieveEntries()\n\n if not BMI_DB.isEmpty():\n print(BMI_DB.retriveEntries())\n\n #Converting weight from pounds to kg, also checking if weight is a number\n try:\n weight = float(weight)\n weight_kg = weight * 0.45\n except (TypeError, ValueError):\n addEntry(db_height, db_weight, \"Invalid parameter: inputted weight is not a number\")\n return \"Invalid parameter: inputted weight is not a number\"\n\n #Converting height from feet and inches to meters, also checking if the height is a number\n try:\n height_inches = float(height_inches)\n height_feet = float(height_feet)\n height_inches = (12 * height_feet) + height_inches\n height_meters = height_inches * 0.025\n except (TypeError, ValueError):\n addEntry(db_height, db_weight, \"Invalid parameter: inputted height is not a number\")\n return \"Invalid parameter: inputted height is not a number\"\n\n #Checking if height and weight follow the constraints\n if height_inches > 108:\n addEntry(db_height, db_weight, \"Invalid parameter: inputted height is too big\")\n return \"Invalid parameter: inputted height is too big\"\n elif height_inches < 21:\n addEntry(db_height, db_weight, \"Invalid parameter: inputted height is too small\")\n return \"Invalid parameter: inputted height is too small\"\n\n if weight > 1400:\n addEntry(db_height, db_weight, \"Invalid parameter: inputted weight is too big\")\n return \"Invalid parameter: inputted weight is too big\"\n elif weight < 4.7:\n addEntry(db_height, db_weight, \"Invalid parameter: inputted weight is too small\")\n return \"Invalid parameter: inputted weight is too small\"\n\n #doing the BMI math\n divisor = height_meters**2\n output = weight_kg/divisor\n\n #rounding the output to 2 decimal points\n output = round(output, 1)\n\n if output <= 18.5:\n output = \"Undeweight - \" + str(output)\n elif output > 18.5 and output <= 24.9:\n output = \"Normal - \" + str(output)\n elif output > 25 and output <= 29.9:\n output = \"Overweight - \" + str(output)\n else:\n output = \"Obese - \" + str(output)\n\n #BMI_DB_Mock1.addEntry(db_height, db_weight, output)\n #BMI_DB_Fake.addEntry(db_height, db_weight, output)\n addEntry(db_height, db_weight, output)\n\n return output\n\ndef addEntry(height, weight, output):\n BMI_DB.addEntry(height, weight, output)\n\ndef retrieveEntry():\n return BMI_DB.retriveEntries()\n\ndef bmi_close():\n BMI_DB.closeDB()\n","repo_name":"danilo-souza/PPA2","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26485258881","text":"\"\"\"\nFunctions implementing the main command-line subcommands.\n\"\"\"\nimport csv\nimport os\nimport os.path\nimport sys\n\nimport screed\nfrom .compare import (compare_all_pairs, compare_serial_containment,\n compare_serial_max_containment)\nfrom . import MinHash\nfrom .sbtmh import load_sbt_index, create_sbt_index\nfrom . import signature as sig\nfrom . import sourmash_args\nfrom .logging import notify, error, print_results, set_quiet\nfrom .sbtmh import SearchMinHashesFindBest, SigLeaf\n\nfrom .sourmash_args import DEFAULT_LOAD_K, FileOutput, FileOutputCSV\n\nDEFAULT_N = 500\nWATERMARK_SIZE = 10000\n\nfrom .command_compute import compute\n\n\ndef compare(args):\n \"Compare multiple signature files and create a distance matrix.\"\n import numpy\n\n set_quiet(args.quiet)\n moltype = sourmash_args.calculate_moltype(args)\n\n inp_files = list(args.signatures)\n if args.from_file:\n more_files = sourmash_args.load_file_list_of_signatures(args.from_file)\n inp_files.extend(more_files)\n\n progress = sourmash_args.SignatureLoadingProgress()\n\n # load in the various signatures\n siglist = []\n ksizes = set()\n moltypes = set()\n for filename in inp_files:\n notify(\"loading '{}'\", filename, end='\\r')\n loaded = sourmash_args.load_file_as_signatures(filename,\n ksize=args.ksize,\n select_moltype=moltype,\n yield_all_files=args.force,\n progress=progress)\n loaded = list(loaded)\n if not loaded:\n notify('\\nwarning: no signatures loaded at given ksize/molecule type from {}', filename)\n siglist.extend(loaded)\n\n # track ksizes/moltypes\n for s in loaded:\n ksizes.add(s.minhash.ksize)\n moltypes.add(sourmash_args.get_moltype(s))\n\n # error out while loading if we have more than one ksize/moltype\n if len(ksizes) > 1 or len(moltypes) > 1:\n break\n\n if not siglist:\n error('no signatures found! exiting.')\n sys.exit(-1)\n\n # check ksizes and type\n if len(ksizes) > 1:\n error('multiple k-mer sizes loaded; please specify one with -k.')\n ksizes = sorted(ksizes)\n error('(saw k-mer sizes {})'.format(', '.join(map(str, ksizes))))\n sys.exit(-1)\n\n if len(moltypes) > 1:\n error('multiple molecule types loaded; please specify --dna, --protein')\n sys.exit(-1)\n\n notify(' '*79, end='\\r')\n notify('loaded {} signatures total.'.format(len(siglist)))\n\n # check to make sure they're potentially compatible - either using\n # scaled, or not.\n scaled_sigs = [s.minhash.scaled for s in siglist]\n is_scaled = all(scaled_sigs)\n is_scaled_2 = any(scaled_sigs)\n\n # complain if it's not all one or the other\n if is_scaled != is_scaled_2:\n error('cannot mix scaled signatures with bounded signatures')\n sys.exit(-1)\n\n is_containment = False\n if args.containment or args.max_containment:\n is_containment = True\n\n if args.containment and args.max_containment:\n notify(\"ERROR: cannot specify both --containment and --max-containment!\")\n sys.exit(-1)\n\n # complain if --containment and not is_scaled\n if is_containment and not is_scaled:\n error('must use scaled signatures with --containment and --max-containment')\n sys.exit(-1)\n\n # notify about implicit --ignore-abundance:\n if is_containment:\n track_abundances = any(( s.minhash.track_abundance for s in siglist ))\n if track_abundances:\n notify('NOTE: --containment and --max-containment ignore signature abundances.')\n\n # if using --scaled, downsample appropriately\n printed_scaled_msg = False\n if is_scaled:\n max_scaled = max(s.minhash.scaled for s in siglist)\n for s in siglist:\n if s.minhash.scaled != max_scaled:\n if not printed_scaled_msg:\n notify('downsampling to scaled value of {}'.format(max_scaled))\n printed_scaled_msg = True\n s.minhash = s.minhash.downsample(scaled=max_scaled)\n\n if len(siglist) == 0:\n error('no signatures!')\n sys.exit(-1)\n\n notify('')\n\n # build the distance matrix\n numpy.set_printoptions(precision=3, suppress=True)\n\n # do all-by-all calculation\n\n labeltext = [str(item) for item in siglist]\n if args.containment:\n similarity = compare_serial_containment(siglist)\n elif args.max_containment:\n similarity = compare_serial_max_containment(siglist)\n else:\n similarity = compare_all_pairs(siglist, args.ignore_abundance,\n n_jobs=args.processes)\n\n if len(siglist) < 30:\n for i, E in enumerate(siglist):\n # for small matrices, pretty-print some output\n name_num = '{}-{}'.format(i, str(E))\n if len(name_num) > 20:\n name_num = name_num[:17] + '...'\n print_results('{:20s}\\t{}'.format(name_num, similarity[i, :, ],))\n\n print_results('min similarity in matrix: {:.3f}', numpy.min(similarity))\n # shall we output a matrix?\n if args.output:\n labeloutname = args.output + '.labels.txt'\n notify('saving labels to: {}', labeloutname)\n with open(labeloutname, 'w') as fp:\n fp.write(\"\\n\".join(labeltext))\n\n notify('saving comparison matrix to: {}', args.output)\n with open(args.output, 'wb') as fp:\n numpy.save(fp, similarity)\n\n # output CSV?\n if args.csv:\n with FileOutputCSV(args.csv) as csv_fp:\n w = csv.writer(csv_fp)\n w.writerow(labeltext)\n\n for i in range(len(labeltext)):\n y = []\n for j in range(len(labeltext)):\n y.append('{}'.format(similarity[i][j]))\n w.writerow(y)\n\n\ndef plot(args):\n \"Produce a clustering matrix and plot.\"\n import matplotlib as mpl\n mpl.use('Agg')\n import numpy\n import pylab\n import scipy.cluster.hierarchy as sch\n from . import fig as sourmash_fig\n\n # load files\n D_filename = args.distances\n labelfilename = D_filename + '.labels.txt'\n\n notify('loading comparison matrix from {}...', D_filename)\n D = numpy.load(open(D_filename, 'rb'))\n notify('...got {} x {} matrix.', *D.shape)\n\n if args.labeltext:\n labelfilename = args.labeltext\n notify('loading labels from {}', labelfilename)\n labeltext = [ x.strip() for x in open(labelfilename) ]\n if len(labeltext) != D.shape[0]:\n error('{} labels != matrix size, exiting', len(labeltext))\n sys.exit(-1)\n\n # build filenames, decide on PDF/PNG output\n dendrogram_out = os.path.basename(D_filename) + '.dendro'\n if args.pdf:\n dendrogram_out += '.pdf'\n else:\n dendrogram_out += '.png'\n\n matrix_out = os.path.basename(D_filename) + '.matrix'\n if args.pdf:\n matrix_out += '.pdf'\n else:\n matrix_out += '.png'\n\n hist_out = os.path.basename(D_filename) + '.hist'\n if args.pdf:\n hist_out += '.pdf'\n else:\n hist_out += '.png'\n\n # output to a different directory?\n if args.output_dir:\n if not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n dendrogram_out = os.path.join(args.output_dir, dendrogram_out)\n matrix_out = os.path.join(args.output_dir, matrix_out)\n hist_out = os.path.join(args.output_dir, hist_out)\n\n # make the histogram\n notify('saving histogram of matrix values => {}', hist_out)\n fig = pylab.figure(figsize=(8,5))\n pylab.hist(numpy.array(D.flat), bins=100)\n fig.savefig(hist_out)\n\n ### make the dendrogram:\n fig = pylab.figure(figsize=(8,5))\n ax1 = fig.add_axes([0.1, 0.1, 0.7, 0.8])\n ax1.set_xticks([])\n ax1.set_yticks([])\n\n # subsample?\n if args.subsample:\n numpy.random.seed(args.subsample_seed)\n\n sample_idx = list(range(len(labeltext)))\n numpy.random.shuffle(sample_idx)\n sample_idx = sample_idx[:args.subsample]\n\n np_idx = numpy.array(sample_idx)\n D = D[numpy.ix_(np_idx, np_idx)]\n labeltext = [ labeltext[idx] for idx in sample_idx ]\n\n ### do clustering\n Y = sch.linkage(D, method='single')\n sch.dendrogram(Y, orientation='right', labels=labeltext)\n fig.savefig(dendrogram_out)\n notify('wrote dendrogram to: {}', dendrogram_out)\n\n ### make the dendrogram+matrix:\n (fig, rlabels, rmat) = sourmash_fig.plot_composite_matrix(D, labeltext,\n show_labels=args.labels,\n show_indices=args.indices,\n vmin=args.vmin,\n vmax=args.vmax,\n force=args.force)\n fig.savefig(matrix_out)\n notify('wrote numpy distance matrix to: {}', matrix_out)\n\n if len(labeltext) < 30:\n # for small matrices, print out sample numbering for FYI.\n for i, name in enumerate(labeltext):\n print_results('{}\\t{}', i, name)\n\n # write out re-ordered matrix and labels\n if args.csv:\n with FileOutputCSV(args.csv) as csv_fp:\n w = csv.writer(csv_fp)\n w.writerow(rlabels)\n\n for i in range(len(rlabels)):\n y = []\n for j in range(len(rlabels)):\n y.append('{}'.format(rmat[i][j]))\n w.writerow(y)\n notify('Wrote clustered matrix and labels out to {}', args.csv)\n\n\ndef import_csv(args):\n \"Import a CSV file full of signatures/hashes.\"\n\n with open(args.mash_csvfile, newline='') as fp:\n reader = csv.reader(fp)\n siglist = []\n for row in reader:\n hashfn = row[0]\n hashseed = int(row[1])\n\n # only support a limited import type, for now ;)\n assert hashfn == 'murmur64'\n assert hashseed == 42\n\n _, _, ksize, name, hashes = row\n ksize = int(ksize)\n\n hashes = hashes.strip()\n hashes = list(map(int, hashes.split(' ' )))\n\n e = MinHash(len(hashes), ksize)\n e.add_many(hashes)\n s = sig.SourmashSignature(e, filename=name)\n siglist.append(s)\n notify('loaded signature: {} {}', name, s.md5sum()[:8])\n\n notify('saving {} signatures to JSON', len(siglist))\n with FileOutput(args.output, 'wt') as outfp:\n sig.save_signatures(siglist, outfp)\n\n\ndef sbt_combine(args):\n inp_files = list(args.sbts)\n notify('combining {} SBTs', len(inp_files))\n\n tree = load_sbt_index(inp_files.pop(0))\n\n for f in inp_files:\n new_tree = load_sbt_index(f)\n # TODO: check if parameters are the same for both trees!\n tree.combine(new_tree)\n\n notify('saving SBT under \"{}\".', args.sbt_name)\n tree.save(args.sbt_name)\n\n\ndef index(args):\n \"\"\"\n Build a Sequence Bloom Tree index of the given signatures.\n \"\"\"\n set_quiet(args.quiet)\n moltype = sourmash_args.calculate_moltype(args)\n\n if args.append:\n tree = load_sbt_index(args.sbt_name)\n else:\n tree = create_sbt_index(args.bf_size, n_children=args.n_children)\n\n if args.sparseness < 0 or args.sparseness > 1.0:\n error('sparseness must be in range [0.0, 1.0].')\n\n if args.scaled:\n args.scaled = int(args.scaled)\n notify('downsampling signatures to scaled={}', args.scaled)\n\n inp_files = list(args.signatures)\n if args.from_file:\n more_files = sourmash_args.load_file_list_of_signatures(args.from_file)\n inp_files.extend(more_files)\n\n if not inp_files:\n error(\"ERROR: no files to index!? Supply on command line or use --from-file\")\n sys.exit(-1)\n\n notify('loading {} files into SBT', len(inp_files))\n\n progress = sourmash_args.SignatureLoadingProgress()\n\n n = 0\n ksizes = set()\n moltypes = set()\n nums = set()\n scaleds = set()\n for f in inp_files:\n siglist = sourmash_args.load_file_as_signatures(f,\n ksize=args.ksize,\n select_moltype=moltype,\n yield_all_files=args.force,\n progress=progress)\n\n # load all matching signatures in this file\n ss = None\n for ss in siglist:\n ksizes.add(ss.minhash.ksize)\n moltypes.add(sourmash_args.get_moltype(ss))\n nums.add(ss.minhash.num)\n\n if args.scaled:\n ss.minhash = ss.minhash.downsample(scaled=args.scaled)\n scaleds.add(ss.minhash.scaled)\n\n tree.insert(ss)\n n += 1\n\n if not ss:\n continue\n\n # check to make sure we aren't loading incompatible signatures\n if len(ksizes) > 1 or len(moltypes) > 1:\n error('multiple k-mer sizes or molecule types present; fail.')\n error('specify --dna/--protein and --ksize as necessary')\n error('ksizes: {}; moltypes: {}',\n \", \".join(map(str, ksizes)), \", \".join(moltypes))\n sys.exit(-1)\n\n if nums == { 0 } and len(scaleds) == 1:\n pass # good\n elif scaleds == { 0 } and len(nums) == 1:\n pass # also good\n else:\n error('trying to build an SBT with incompatible signatures.')\n error('nums = {}; scaleds = {}', repr(nums), repr(scaleds))\n sys.exit(-1)\n\n notify('')\n\n # did we load any!?\n if n == 0:\n error('no signatures found to load into tree!? failing.')\n sys.exit(-1)\n\n notify('loaded {} sigs; saving SBT under \"{}\"', n, args.sbt_name)\n tree.save(args.sbt_name, sparseness=args.sparseness)\n\n\ndef search(args):\n from .search import search_databases\n\n set_quiet(args.quiet)\n moltype = sourmash_args.calculate_moltype(args)\n\n # set up the query.\n query = sourmash_args.load_query_signature(args.query,\n ksize=args.ksize,\n select_moltype=moltype,\n select_md5=args.md5)\n notify('loaded query: {}... (k={}, {})', str(query)[:30],\n query.minhash.ksize,\n sourmash_args.get_moltype(query))\n\n # downsample if requested\n if args.scaled:\n if not query.minhash.scaled:\n error('cannot downsample a signature not created with --scaled')\n sys.exit(-1)\n\n if args.scaled != query.minhash.scaled:\n notify('downsampling query from scaled={} to {}',\n query.minhash.scaled, int(args.scaled))\n query.minhash = query.minhash.downsample(scaled=args.scaled)\n\n # set up the search databases\n is_containment = args.containment or args.max_containment\n if is_containment:\n if args.containment and args.max_containment:\n notify(\"ERROR: cannot specify both --containment and --max-containment!\")\n sys.exit(-1)\n\n databases = sourmash_args.load_dbs_and_sigs(args.databases, query,\n not is_containment)\n\n # forcibly ignore abundances if query has no abundances\n if not query.minhash.track_abundance:\n args.ignore_abundance = True\n\n if not len(databases):\n error('Nothing found to search!')\n sys.exit(-1)\n\n # do the actual search\n results = search_databases(query, databases,\n threshold=args.threshold,\n do_containment=args.containment,\n do_max_containment=args.max_containment,\n best_only=args.best_only,\n ignore_abundance=args.ignore_abundance,\n unload_data=True)\n\n n_matches = len(results)\n if args.best_only:\n args.num_results = 1\n\n if not args.num_results or n_matches <= args.num_results:\n print_results('{} matches:'.format(len(results)))\n else:\n print_results('{} matches; showing first {}:',\n len(results), args.num_results)\n n_matches = args.num_results\n\n # output!\n print_results(\"similarity match\")\n print_results(\"---------- -----\")\n for sr in results[:n_matches]:\n pct = '{:.1f}%'.format(sr.similarity*100)\n name = sr.match._display_name(60)\n print_results('{:>6} {}', pct, name)\n\n if args.best_only:\n notify(\"** reporting only one match because --best-only was set\")\n\n if args.output:\n fieldnames = ['similarity', 'name', 'filename', 'md5',\n 'query_filename', 'query_name', 'query_md5']\n\n with FileOutputCSV(args.output) as fp:\n w = csv.DictWriter(fp, fieldnames=fieldnames)\n\n w.writeheader()\n for sr in results:\n d = dict(sr._asdict())\n del d['match']\n del d['query']\n w.writerow(d)\n\n # save matching signatures upon request\n if args.save_matches:\n notify('saving all matched signatures to \"{}\"', args.save_matches)\n with FileOutput(args.save_matches, 'wt') as fp:\n sig.save_signatures([ sr.match for sr in results ], fp)\n\n\ndef categorize(args):\n \"Use a database to find the best match to many signatures.\"\n set_quiet(args.quiet)\n moltype = sourmash_args.calculate_moltype(args)\n\n # eliminate names we've already categorized\n already_names = set()\n if args.load_csv:\n with open(args.load_csv, newline='') as fp:\n r = csv.reader(fp)\n for row in r:\n already_names.add(row[0])\n\n # load search database\n tree = load_sbt_index(args.sbt_name)\n\n # load query filenames\n inp_files = set(sourmash_args.traverse_find_sigs(args.queries))\n inp_files = inp_files - already_names\n\n notify('found {} files to query', len(inp_files))\n\n loader = sourmash_args.LoadSingleSignatures(inp_files,\n args.ksize, moltype)\n\n csv_w = None\n csv_fp = None\n if args.csv:\n csv_fp = open(args.csv, 'w', newline='')\n csv_w = csv.writer(csv_fp)\n\n for queryfile, query, query_moltype, query_ksize in loader:\n notify('loaded query: {}... (k={}, {})', str(query)[:30],\n query_ksize, query_moltype)\n\n results = []\n search_fn = SearchMinHashesFindBest().search\n\n # note, \"ignore self\" here may prevent using newer 'tree.search' fn.\n for leaf in tree.find(search_fn, query, args.threshold):\n if leaf.data.md5sum() != query.md5sum(): # ignore self.\n similarity = query.similarity(\n leaf.data, ignore_abundance=args.ignore_abundance)\n results.append((similarity, leaf.data))\n\n best_hit_sim = 0.0\n best_hit_query_name = \"\"\n if results:\n results.sort(key=lambda x: -x[0]) # reverse sort on similarity\n best_hit_sim, best_hit_query = results[0]\n notify('for {}, found: {:.2f} {}', query,\n best_hit_sim,\n best_hit_query)\n best_hit_query_name = best_hit_query.name\n else:\n notify('for {}, no match found', query)\n\n if csv_w:\n csv_w.writerow([queryfile, query, best_hit_query_name,\n best_hit_sim])\n\n if loader.skipped_ignore:\n notify('skipped/ignore: {}', loader.skipped_ignore)\n if loader.skipped_nosig:\n notify('skipped/nosig: {}', loader.skipped_nosig)\n\n if csv_fp:\n csv_fp.close()\n\n\ndef gather(args):\n from .search import gather_databases, format_bp\n\n set_quiet(args.quiet, args.debug)\n moltype = sourmash_args.calculate_moltype(args)\n\n # load the query signature & figure out all the things\n query = sourmash_args.load_query_signature(args.query,\n ksize=args.ksize,\n select_moltype=moltype,\n select_md5=args.md5)\n notify('loaded query: {}... (k={}, {})', str(query)[:30],\n query.minhash.ksize,\n sourmash_args.get_moltype(query))\n\n # verify signature was computed right.\n if not query.minhash.scaled:\n error('query signature needs to be created with --scaled')\n sys.exit(-1)\n\n # downsample if requested\n if args.scaled:\n notify('downsampling query from scaled={} to {}',\n query.minhash.scaled, int(args.scaled))\n query.minhash = query.minhash.downsample(scaled=args.scaled)\n\n # empty?\n if not len(query.minhash):\n error('no query hashes!? exiting.')\n sys.exit(-1)\n\n # set up the search databases\n cache_size = args.cache_size\n if args.cache_size == 0:\n cache_size = None\n databases = sourmash_args.load_dbs_and_sigs(args.databases, query, False,\n cache_size=cache_size)\n\n if not len(databases):\n error('Nothing found to search!')\n sys.exit(-1)\n\n found = []\n weighted_missed = 1\n new_max_hash = query.minhash._max_hash\n next_query = query\n\n for result, weighted_missed, new_max_hash, next_query in gather_databases(query, databases, args.threshold_bp, args.ignore_abundance):\n if not len(found): # first result? print header.\n if query.minhash.track_abundance and not args.ignore_abundance:\n print_results(\"\")\n print_results(\"overlap p_query p_match avg_abund\")\n print_results(\"--------- ------- ------- ---------\")\n else:\n print_results(\"\")\n print_results(\"overlap p_query p_match\")\n print_results(\"--------- ------- -------\")\n\n\n # print interim result & save in `found` list for later use\n pct_query = '{:.1f}%'.format(result.f_unique_weighted*100)\n pct_genome = '{:.1f}%'.format(result.f_match*100)\n name = result.match._display_name(40)\n\n if query.minhash.track_abundance and not args.ignore_abundance:\n average_abund ='{:.1f}'.format(result.average_abund)\n print_results('{:9} {:>7} {:>7} {:>9} {}',\n format_bp(result.intersect_bp), pct_query, pct_genome,\n average_abund, name)\n else:\n print_results('{:9} {:>7} {:>7} {}',\n format_bp(result.intersect_bp), pct_query, pct_genome,\n name)\n found.append(result)\n\n if args.num_results and len(found) >= args.num_results:\n break\n\n\n # basic reporting\n print_results('\\nfound {} matches total;', len(found))\n if args.num_results and len(found) == args.num_results:\n print_results('(truncated gather because --num-results={})',\n args.num_results)\n\n print_results('the recovered matches hit {:.1f}% of the query',\n (1 - weighted_missed) * 100)\n print_results('')\n\n if found and args.output:\n fieldnames = ['intersect_bp', 'f_orig_query', 'f_match',\n 'f_unique_to_query', 'f_unique_weighted',\n 'average_abund', 'median_abund', 'std_abund', 'name',\n 'filename', 'md5', 'f_match_orig', 'unique_intersect_bp',\n 'gather_result_rank', 'remaining_bp']\n\n with FileOutputCSV(args.output) as fp:\n w = csv.DictWriter(fp, fieldnames=fieldnames)\n w.writeheader()\n for result in found:\n d = dict(result._asdict())\n del d['match'] # actual signature not in CSV.\n w.writerow(d)\n\n if found and args.save_matches:\n notify('saving all matches to \"{}\"', args.save_matches)\n with FileOutput(args.save_matches, 'wt') as fp:\n sig.save_signatures([ r.match for r in found ], fp)\n\n if args.output_unassigned:\n if not len(next_query.minhash):\n notify('no unassigned hashes to save with --output-unassigned!')\n else:\n notify('saving unassigned hashes to \"{}\"', args.output_unassigned)\n\n with FileOutput(args.output_unassigned, 'wt') as fp:\n sig.save_signatures([ next_query ], fp)\n\n\ndef multigather(args):\n \"Gather many signatures against multiple databases.\"\n from .search import gather_databases, format_bp\n\n set_quiet(args.quiet)\n moltype = sourmash_args.calculate_moltype(args)\n\n if not args.db:\n error('Error! must specify at least one database with --db')\n sys.exit(-1)\n\n if not args.query and not args.query_from_file:\n error('Error! must specify at least one query signature with --query')\n sys.exit(-1)\n\n # flatten --db and --query\n args.db = [item for sublist in args.db for item in sublist]\n inp_files = [item for sublist in args.query for item in sublist]\n if args.query_from_file:\n more_files = sourmash_args.load_file_list_of_signatures(args.query_from_file)\n inp_files.extend(more_files)\n\n # need a query to get ksize, moltype for db loading\n query = next(iter(sourmash_args.load_file_as_signatures(inp_files[0], ksize=args.ksize, select_moltype=moltype)))\n\n databases = sourmash_args.load_dbs_and_sigs(args.db, query, False)\n\n if not len(databases):\n error('Nothing found to search!')\n sys.exit(-1)\n\n # run gather on all the queries.\n n=0\n for queryfile in inp_files:\n # load the query signature(s) & figure out all the things\n for query in sourmash_args.load_file_as_signatures(queryfile,\n ksize=args.ksize,\n select_moltype=moltype):\n notify('loaded query: {}... (k={}, {})', str(query)[:30],\n query.minhash.ksize, sourmash_args.get_moltype(query))\n\n # verify signature was computed right.\n if not query.minhash.scaled:\n error('query signature needs to be created with --scaled; skipping')\n continue\n\n # downsample if requested\n if args.scaled:\n notify('downsampling query from scaled={} to {}',\n query.minhash.scaled, int(args.scaled))\n query.minhash = query.minhash.downsample(scaled=args.scaled)\n\n # empty?\n if not len(query.minhash):\n error('no query hashes!? skipping to next..')\n continue\n\n found = []\n weighted_missed = 1\n for result, weighted_missed, new_max_hash, next_query in gather_databases(query, databases, args.threshold_bp, args.ignore_abundance):\n if not len(found): # first result? print header.\n if query.minhash.track_abundance and not args.ignore_abundance:\n print_results(\"\")\n print_results(\"overlap p_query p_match avg_abund\")\n print_results(\"--------- ------- ------- ---------\")\n else:\n print_results(\"\")\n print_results(\"overlap p_query p_match\")\n print_results(\"--------- ------- -------\")\n\n\n # print interim result & save in a list for later use\n pct_query = '{:.1f}%'.format(result.f_unique_weighted*100)\n pct_genome = '{:.1f}%'.format(result.f_match*100)\n name = result.match._display_name(40)\n\n if query.minhash.track_abundance and not args.ignore_abundance:\n average_abund ='{:.1f}'.format(result.average_abund)\n print_results('{:9} {:>7} {:>7} {:>9} {}',\n format_bp(result.intersect_bp), pct_query, pct_genome,\n average_abund, name)\n else:\n print_results('{:9} {:>7} {:>7} {}',\n format_bp(result.intersect_bp), pct_query, pct_genome,\n name)\n found.append(result)\n\n\n # basic reporting\n print_results('\\nfound {} matches total;', len(found))\n\n print_results('the recovered matches hit {:.1f}% of the query',\n (1 - weighted_missed) * 100)\n print_results('')\n\n if not found:\n notify('nothing found... skipping.')\n continue\n\n query_filename = query.filename\n if not query_filename:\n # use md5sum if query.filename not properly set\n query_filename = query.md5sum()\n\n output_base = os.path.basename(query_filename)\n output_csv = output_base + '.csv'\n\n fieldnames = ['intersect_bp', 'f_orig_query', 'f_match',\n 'f_unique_to_query', 'f_unique_weighted',\n 'average_abund', 'median_abund', 'std_abund', 'name',\n 'filename', 'md5', 'f_match_orig',\n 'unique_intersect_bp', 'gather_result_rank',\n 'remaining_bp']\n with FileOutputCSV(output_csv) as fp:\n w = csv.DictWriter(fp, fieldnames=fieldnames)\n w.writeheader()\n for result in found:\n d = dict(result._asdict())\n del d['match'] # actual signature not output to CSV!\n w.writerow(d)\n\n output_matches = output_base + '.matches.sig'\n with open(output_matches, 'wt') as fp:\n outname = output_matches\n notify('saving all matches to \"{}\"', outname)\n sig.save_signatures([ r.match for r in found ], fp)\n\n output_unassigned = output_base + '.unassigned.sig'\n with open(output_unassigned, 'wt') as fp:\n if not found:\n notify('nothing found - entire query signature unassigned.')\n elif not len(query.minhash):\n notify('no unassigned hashes! not saving.')\n else:\n notify('saving unassigned hashes to \"{}\"', output_unassigned)\n\n e = MinHash(ksize=query.minhash.ksize, n=0, max_hash=new_max_hash)\n e.add_many(next_query.minhash.hashes)\n sig.save_signatures([ sig.SourmashSignature(e) ], fp)\n n += 1\n\n # fini, next query!\n notify('\\nconducted gather searches on {} signatures', n)\n\n\ndef watch(args):\n \"Build a signature from raw FASTA/FASTQ coming in on stdin, search.\"\n set_quiet(args.quiet)\n\n if args.input_is_protein and args.dna:\n notify('WARNING: input is protein, turning off nucleotide hashing.')\n args.dna = False\n args.protein = True\n\n if args.dna and args.protein:\n notify('ERROR: cannot use \"watch\" with both nucleotide and protein.')\n\n if args.dna:\n moltype = 'DNA'\n is_protein = False\n dayhoff = False\n hp = False\n elif args.protein:\n moltype = 'protein'\n is_protein = True\n dayhoff = False\n hp = False\n elif args.dayhoff:\n moltype = 'dayhoff'\n is_protein = True\n dayhoff = True\n hp = False\n else:\n moltype = 'hp'\n is_protein = True\n dayhoff = False\n hp = True\n\n tree = load_sbt_index(args.sbt_name)\n\n # check ksize from the SBT we are loading\n ksize = args.ksize\n if ksize is None:\n leaf = next(iter(tree.leaves()))\n tree_mh = leaf.data.minhash\n ksize = tree_mh.ksize\n\n E = MinHash(ksize=ksize, n=args.num_hashes, is_protein=is_protein, dayhoff=dayhoff, hp=hp)\n\n notify('Computing signature for k={}, {} from stdin', ksize, moltype)\n\n def do_search():\n results = []\n streamsig = sig.SourmashSignature(E, filename='stdin', name=args.name)\n for similarity, match, _ in tree.search(streamsig,\n threshold=args.threshold,\n best_only=True,\n ignore_abundance=True,\n do_containment=False):\n results.append((similarity, match))\n\n return results\n\n notify('reading sequences from stdin')\n screed_iter = screed.open(args.inp_file)\n watermark = WATERMARK_SIZE\n\n # iterate over input records\n n = 0\n for n, record in enumerate(screed_iter):\n # at each watermark, print status & check cardinality\n if n >= watermark:\n notify('\\r... read {} sequences', n, end='')\n watermark += WATERMARK_SIZE\n\n if do_search():\n break\n\n if args.input_is_protein:\n E.add_protein(record.sequence)\n else:\n E.add_sequence(record.sequence, False)\n\n results = do_search()\n if not results:\n notify('... read {} sequences, no matches found.', n)\n else:\n results.sort(key=lambda x: -x[0]) # take best\n similarity, found_sig = results[0]\n print_results('FOUND: {}, at {:.3f}', found_sig,\n similarity)\n\n if args.output:\n notify('saving signature to {}', args.output)\n with FileOutput(args.output, 'wt') as fp:\n streamsig = sig.SourmashSignature(E, filename='stdin',\n name=args.name)\n sig.save_signatures([streamsig], fp)\n\n\ndef migrate(args):\n \"Migrate an SBT database to the latest version.\"\n tree = load_sbt_index(args.sbt_name, print_version_warning=False)\n\n notify('saving SBT under \"{}\".', args.sbt_name)\n tree.save(args.sbt_name, structure_only=True)\n","repo_name":"Domedriver/sourmash","sub_path":"src/sourmash/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":34467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"40489447303","text":"import socket\r\n\r\n# IP-адрес и порт, на котором Raspberry Pi будет при��имать данные\r\nHOST = 'IP_АДРЕС_RASPBERRY_PI'\r\nPORT = 12345\r\n\r\nwhile True:\r\n # socket.socket - это функция, которая создает объект сокета. Она принимает два аргумента: socket.AF_INET, который указывает на использование протокола IPv4, и socket.SOCK_STREAM, который указывает на использование протокола TCP.\r\n # Фраза with ... as ... используется для автоматического закрытия ресурсов после завершения работы с ними. В данном случае, после выполнения блока кода внутри конструкции with, сокет будет автоматически закрыт.\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n\r\n #bind() привязывает сокет к конкретному IP-адресу и порту на устройстве. Она принимает в качестве аргументов IP-адрес и номер порта, к которому вы хотите привязать сокет.\r\n s.bind((HOST, PORT))\r\n #listen() говорит сокету начать прослушивание на входящие соединения. Он принимает один аргумент, который указывает максимальное количество ожидающих подключений в очереди.\r\n s.listen(1)\r\n print('Ожидание подключения...')\r\n\r\n conn, addr = s.accept()\r\n print('Подключено к:', addr)\r\n\r\n while True:\r\n #data = conn.recv(1024) выполняется операция получения данных из сокета conn. recv(1024) указывает, что мы пытаемся прочитать до 1024 байт из сокета conn. Фактически, recv() читает данные из сокета в виде последовательности байтов и сохраняет их в переменной data.\r\n data = conn.recv(1024)\r\n\r\n if not data:\r\n break\r\n\r\n print('Принято:', data.decode())\r\n\r\n response = 'Данные получены и обработаны.'\r\n\r\n #conn.sendall(response.encode()) выполняется операция отправки данных через сокет conn.\r\n #response.encode() используется для преобразования строки response в байтовую последовательность, так как сокеты работают с данными в виде байтов.\r\n conn.sendall(response.encode())\r\n\r\n conn.close()","repo_name":"PostNeoNoir/Ethernet_Connection_Client","sub_path":"Ethernet_Connection_Client.py","file_name":"Ethernet_Connection_Client.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39669166777","text":"from drawunit import drawunit\nimport random\n\nclass critters(drawunit):\n def __init__(self,x,y,width,height,classtype,coordinateslist):\n drawunit.__init__(self,x,y,width,height,classtype)\n self.coordinateslist=coordinateslist\n self.animationstate=random.randint(0,len(coordinateslist)-1)\n self.idleframes=0\n def getcoordinates(self):\n return self.coordinateslist[self.animationstate%len(self.coordinateslist)]\n def frameupdate(self):\n self.x+=1\n self.idleframes+=1\n if self.idleframes >= 15:\n self.animationstate+=1\n self.idleframes=0\n \n \n\n\n","repo_name":"Melpnn/Minigame_Test","sub_path":"critters.py","file_name":"critters.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28405302875","text":"def listoverlap(list1, list2):\n set1 = set(list1)\n set2 = set(list2)\n new_list = set1 & set2\n set_list = set(new_list)\n new_list1 = list(new_list)\n return new_list1\n\ndef main():\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CodecoolBP20172/pbwp-2nd-tw-pair-programming-exercises-team_15","sub_path":"listoverlap/listoverlap_module.py","file_name":"listoverlap_module.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5729557911","text":"import unittest\nimport vfxpaths\nfrom vfxpaths import Resolve\nfrom tests.config_data.config_data import ConfigTest\n\n\nclass TestResolve(unittest.TestCase):\n vfxpaths.maps_config_field(ConfigTest)\n\n @classmethod\n def setUpClass(cls):\n print(\"start test\")\n \n @classmethod\n def tearDownClass(cls):\n print(\"end test\")\n \n def test_get_target_path(self):\n self.resolve = Resolve(target_path=\"O:/\")\n self.assertEqual(self.resolve.get_target_path, \"O:/\")\n\n def test_map_path(self):\n resolve = Resolve(target_path=\"Z:/project/PJ_158189/shots/sq001/sh010/sq001_sh010_v002.ma\",\n use_name=\"base_test\")\n get_value = {'root': 'Z:',\n 'project': 'project',\n 'project_name': 'PJ_158189',\n 'type': 'shots',\n 'sq_name': 'sq001',\n 'shot_name': 'sh010',\n 'version': '002',\n 'ext': 'ma'}\n self.assertEqual(resolve.get_dict_data, get_value)\n\n def test_field_get(self):\n resolve = Resolve(target_path=\"Z:/project/PJ_158189/shots/sq001/sh010/sq001_sh010_v002.ma\",\n use_name=\"base_test\")\n\n self.assertEqual(len(resolve.get_fields_only_list), 8)\n\n def test_field_to_path(self):\n resolve = Resolve(use_name=\"base_test\")\n get_value = {'root': 'Z:',\n 'project': 'project',\n 'project_name': 'PJ_158189',\n 'type': 'shots',\n 'sq_name': 'sq001',\n 'shot_name': 'sh010',\n 'version': '002',\n 'ext': 'ma'}\n current_path = resolve.dict_to_path(get_value)\n self.assertEqual(current_path, \"Z:/project/PJ_158189/shots/sq001/sh010/sq001_sh010_v002.ma\")\n\n","repo_name":"VFXToolkits/vfxpaths","sub_path":"tests/test_resolve.py","file_name":"test_resolve.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24076299327","text":"import subprocess\nimport serial\nimport time\nimport threading\nfrom dabbleController import *\nfrom motorController import *\nfrom servoController import Servo\nfrom FPV.vedioStreamServer import *\n\n# Create a thread to run the StartStream() function\nstream_thread = threading.Thread(target=StartStream)\n\nSERVO_PIN = 40 # Board pin\n\ndef run_rfcomm_watch():\n try:\n # Run the bluetoothctl command to make the device discoverable\n subprocess.run([\"bluetoothctl\", \"discoverable\", \"on\"], check=True)\n\n # Run the command 'sudo rfcomm watch hci0'\n process = subprocess.Popen(['sudo', 'rfcomm', 'watch', 'hci0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \n # Print a message indicating the rfcomm watch is started\n print(\"Waiting for a device to connect...\")\n \n return process\n \n except Exception as e:\n print(\"Error:\", e)\n return None\n\ndef main():\n motorSpeed = (MAX_MOTOR_SPEED - MIN_MOTOR_SPEED)/2 + MIN_MOTOR_SPEED\n # Start the rfcomm watch process\n rfcomm_process = run_rfcomm_watch()\n streamStarted = False\n \n if rfcomm_process is not None:\n try:\n # Wait until a device is connected\n while True:\n status_output = subprocess.check_output(['sudo', 'rfcomm', '-i', 'hci0'])\n if b'rfcomm0: ' in status_output:\n break\n time.sleep(1)\n \n print(\"Device connected!\")\n\n # Open a serial connection using the rfcomm port\n ser = serial.Serial('/dev/rfcomm0', baudrate=9600, timeout=1)\n \n print(\"Motor Speed: \", motorSpeed)\n setMotorSpeed(motorSpeed)\n servo = Servo(pin=SERVO_PIN) # Replace 18 with the GPIO pin you're using\n servoAngle = 80 # Angle between 80 - 180\n\n while True:\n # Sending data\n data_to_send = \"Hello from Raspberry Pi!\\r\\n\"\n ser.write(data_to_send.encode())\n print(\"Sent:\", data_to_send)\n \n # Receiving data\n while ser.inWaiting:\n # Wait for data to be received\n received_data = ser.read(8)\n if received_data:\n if isGamePadChanged(received_data):\n print(\"Game Pad changed !!!\")\n continue\n gamePadMode = getGamePadMode(received_data)\n if gamePadMode == GamePadMode.DIGITAL :\n # Arrow Btn actions\n value = received_data[ArrowBtnByte]\n if isUpAndRightBtnPressed(value):\n moveFrontRight()\n elif isUpAndLeftBtnPressed(value):\n moveFrontLeft()\n elif isDownAndRightBtnPressed(value):\n moveBackRight()\n elif isDownAndLeftBtnPressed(value):\n moveBackLeft()\n elif isUpBtnPressed(value) :\n front() \n elif isDownBtnPressed(value) :\n back()\n elif isRightBtnPressed(value) :\n right()\n elif isLeftBtnPressed(value) :\n left()\n\n if gamePadMode == GamePadMode.JOYSTICK :\n print(\"JOYSTICK Mode\")\n # Arrow Btn actions\n value = received_data[ArrowBtnByte]\n (x_value, y_value, radians) = decodeAngleRadius(value)\n print(\"value\")\n print(int(x_value), int(y_value), int(radians))\n analogMove(int(x_value), int(y_value), int(radians))\n\n # Action btn actions\n value = received_data[ActionBtnByte]\n if isStartBtnPressed(value) and not streamStarted :\n print(\"Start btn pressed\")\n stream_thread.start()\n streamStarted = True\n\n if isSelectBtnPressed(value) :\n print(\"Select btn pressed\")\n \n\n if isTriangleBtnPressed(value) :\n motorSpeed = motorSpeed + 10\n if motorSpeed > MAX_MOTOR_SPEED:\n motorSpeed = MAX_MOTOR_SPEED\n print(\"Motor Speed:\", motorSpeed)\n setMotorSpeed(motorSpeed)\n elif isCrossBtnPressed(value) :\n if motorSpeed < MIN_MOTOR_SPEED:\n motorSpeed = MIN_MOTOR_SPEED \n motorSpeed = motorSpeed - 10\n print(\"Motor Speed:\", motorSpeed)\n setMotorSpeed(motorSpeed)\n\n if isCircleBtnPressed(value) :\n servo.servo_toggle()\n\n if received_data[5] == 0 and received_data[6] == 0 and received_data[7] == 0:\n stay_put() \n\n for byte in received_data:\n print(byte, end =\" \")\n print(\"\")\n \n except KeyboardInterrupt:\n # Close the serial connection\n ser.close()\n \n # Terminate the rfcomm watch process\n rfcomm_process.terminate()\n print(\"Exiting...\")\n \n except Exception as e:\n print(\"Error:\", e)\n if 'ser' in locals():\n ser.close()\n rfcomm_process.terminate()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dhawal-19/Hackathon2K23","sub_path":"bluetoothServer.py","file_name":"bluetoothServer.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18215768482","text":"class Solution:\n def mergeTriplets(self, triplets: List[List[int]], target: List[int]) -> bool:\n merged = [0] * len(target)\n\n for t in triplets:\n if all(a <= b for a, b in zip(t, target)):\n for i in range(3):\n merged[i] = max(merged[i], t[i])\n\n return merged == target\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1899. Merge Triplets to Form Target Triplet/1899.py","file_name":"1899.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"29443330752","text":"from telethon import TelegramClient as AsyncTelethonTelegramClient\nfrom telethon.sync import TelegramClient as SyncTelethonTelegramClient\nfrom pyrogram import Client as PyrogramTelegramClient\nfrom telethon.sessions import MemorySession, SQLiteSession\nfrom pyrogram.storage import MemoryStorage, FileStorage, Storage\nfrom telethon.crypto import AuthKey\nfrom telethon.version import __version__ as telethon_version\nfrom pathlib import Path\nfrom stream_sqlite import stream_sqlite\nfrom typing import Union\nimport io\nimport nest_asyncio\nimport asyncio\nimport base64\nimport struct\nimport platform\nimport sqlite3\n\n\nclass TelegramSession:\n\n DEFAULT_DEFICE_MODEL: str = \"TGS {}\".format(platform.uname().machine)\n DEFAULT_SYSTEM_VERSION: str = platform.uname().release\n DEFAULT_APP_VERSION: str = telethon_version\n USE_NEST_ASYNCIO: bool = False\n \n def __init__(self, auth_key: bytes, dc_id, server_address, port, api_id: int, api_hash: str):\n self._auth_key = auth_key\n self._dc_id = dc_id\n self._server_address = server_address\n self._port = port\n self._api_id = api_id\n self._api_hash = api_hash\n self._loop = self.make_loop()\n\n @property\n def api_id(self):\n if self._api_id is None:\n raise ValueError(\"api_id is required for this method\")\n return self._api_id\n\n @property\n def api_hash(self):\n if self._api_hash is None:\n raise ValueError(\"api_hash is required for this method\")\n return self._api_hash\n\n @api_id.setter\n def api_id(self, value):\n self._api_id = value\n\n @api_hash.setter\n def api_hash(self, value):\n self._api_hash = value\n \n @staticmethod\n def make_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.get_event_loop()\n\n @staticmethod\n def from_sqlite_session_file_stream(\n sqlite_session: io.BytesIO, api_id: int, api_hash: str):\n \"\"\" Create object from io.BytesIO object of read telethon session file(sqlite)\n return TelegramSession object\n if sqlite_session is valid open as BytesIO telethon session\n else -> None\n \"\"\"\n if not isinstance(sqlite_session, io.BytesIO):\n raise TypeError(\n \"sqlite_session must be io.BytesIO object of open and read sqlite3 session file\")\n auth_key = None\n dc_id = None\n server_address = None\n port = None\n\n for table_name, table_info, rows in stream_sqlite(sqlite_session, max_buffer_size=1_048_576):\n if table_name != \"sessions\":\n continue\n for row in rows:\n if hasattr(\n row, \"auth_key\") and hasattr(\n row, \"dc_id\") and hasattr(row, \"server_address\") and hasattr(row, \"port\"):\n if row.auth_key is None:\n continue\n auth_key = row.auth_key\n dc_id = row.dc_id\n server_address = row.server_address\n port = row.port\n break\n if (auth_key is None) or (dc_id is None) or (server_address is None) or (port is None):\n return\n return TelegramSession(auth_key, dc_id, server_address, port, api_id, api_hash)\n\n @staticmethod\n def from_sqlite_session_file(id_or_path: Union[str, io.BytesIO], api_id: int, api_hash: str):\n sqlite_session = id_or_path\n if isinstance(id_or_path, str):\n try:\n with open(id_or_path, \"rb\") as file:\n sqlite_session = io.BytesIO(file.read())\n except FileNotFoundError as exp:\n try:\n with open(\"{}.session\".format(id_or_path), \"rb\") as file:\n sqlite_session = io.BytesIO(file.read())\n except Exception:\n raise exp\n else:\n if not isinstance(id_or_path, io.BytesIO):\n raise TypeError(\"id_or_path must be str name\")\n\n return TelegramSession.from_sqlite_session_file_stream(sqlite_session, api_id, api_hash)\n\n @staticmethod\n def from_telethon_or_pyrogram_client(\n client: Union[\n AsyncTelethonTelegramClient, SyncTelethonTelegramClient, PyrogramTelegramClient]):\n if isinstance(client, (AsyncTelethonTelegramClient, SyncTelethonTelegramClient)):\n # is Telethon\n api_hash = str(client.api_hash)\n if api_hash == str(client.api_id):\n api_hash = None\n return TelegramSession(\n client.session.auth_key.key,\n client.session.dc_id,\n client.session.server_address,\n client.session.port,\n client.api_id, api_hash\n )\n elif isinstance(client, PyrogramTelegramClient):\n pass\n else:\n raise TypeError(\"client must be or instance\")\n\n @classmethod\n def from_tdata(\n cls, path_to_folder: str, api_id: int, api_hash: str,\n device_model: str = None, system_version: str = None, app_version: str = None):\n from opentele.td import TDesktop\n from opentele.api import CreateNewSession, APIData\n tdesk = TDesktop(path_to_folder)\n api = APIData(\n api_id=api_id,\n api_hash=api_hash,\n device_model=device_model or cls.DEFAULT_DEFICE_MODEL,\n system_version=system_version or cls.DEFAULT_SYSTEM_VERSION,\n app_version=app_version or cls.DEFAULT_APP_VERSION\n )\n loop = cls.make_loop()\n if cls.USE_NEST_ASYNCIO:\n nest_asyncio.apply(self._loop)\n\n async def async_wrapper():\n client = await tdesk.ToTelethon(None, CreateNewSession, api)\n await client.connect()\n session = TelegramSession.from_telethon_or_pyrogram_client(client)\n session.api_id = api_id\n session.api_hash = api_hash\n await client.disconnect()\n return session\n\n task = loop.create_task(async_wrapper())\n session = loop.run_until_complete(task)\n return session\n\n def _make_telethon_memory_session_storage(self):\n session = MemorySession()\n session.set_dc(self._dc_id, self._server_address, self._port)\n session.auth_key = AuthKey(data=self._auth_key)\n return session\n\n def _make_telethon_sqlite_session_storoge(\n self, id_or_path: str = \"telethon\", update_table=False, save=False):\n session_storage = SQLiteSession(id_or_path)\n session_storage.set_dc(self._dc_id, self._server_address, self._port)\n session_storage.auth_key = AuthKey(data=self._auth_key)\n if update_table:\n session_storage._update_session_table()\n if save:\n session_storage.save()\n return session_storage\n\n def make_telethon(\n self, session=None, sync=False, **make_args) -> Union[\n AsyncTelethonTelegramClient, SyncTelethonTelegramClient]:\n \"\"\"\n Create client object with current session data\n \"\"\"\n if session is None:\n session = self._make_telethon_memory_session_storage()\n THClientMake = AsyncTelethonTelegramClient\n if sync:\n THClientMake = SyncTelethonTelegramClient\n return THClientMake(session, self.api_id, self.api_hash, **make_args)\n\n async def make_pyrogram(self, session_id: str = \"pyrogram\", **make_args):\n \"\"\"\n Create client object with current session data\n using in_memory session storoge\n \"\"\"\n th_client = self.make_telethon()\n if not th_client:\n return\n async with th_client:\n user_data = await th_client.get_me()\n\n pyrogram_string_session = base64.urlsafe_b64encode(\n struct.pack(\n Storage.SESSION_STRING_FORMAT,\n self._dc_id,\n self.api_id,\n False,\n self._auth_key,\n int(user_data.id or 999999999),\n 0\n )\n ).decode().rstrip(\"=\")\n client = PyrogramTelegramClient(\n session_id, session_string=pyrogram_string_session,\n api_id=self.api_id, api_hash=self.api_hash, **make_args)\n return client\n\n def make_sqlite_session_file(\n self, client_id: str = \"telegram\",\n workdir: str = None, pyrogram: bool = False,\n api_id: int = None, api_hash: str = None) -> bool:\n \"\"\" Make telethon sqlite3 session file\n {id.session} will be created if id_or_path is not the full path to the file\n \"\"\"\n session_workdir = Path.cwd()\n if workdir is not None:\n session_workdir = Path(workdir)\n session_path = \"{}/{}.session\".format(session_workdir, client_id)\n \n if pyrogram:\n session_workdir = Path.cwd()\n if workdir is not None:\n session_workdir = Path(workdir)\n\n # Create pyrogram session\n client = PyrogramTelegramClient(\n client_id, api_id=api_id or self.api_id, api_hash=api_hash or self.api_hash)\n client.storoge = FileStorage(client_id, session_workdir)\n client.storage.conn = sqlite3.Connection(session_path)\n client.storage.create()\n\n async def async_wrapper(client):\n user_id = 999999999\n th_client = self.make_telethon(sync=False)\n if th_client:\n async with th_client:\n user_data = await th_client.get_me()\n user_id = user_data.id\n\n await client.storage.dc_id(self._dc_id)\n await client.storage.api_id(self.api_id)\n await client.storage.test_mode(False)\n await client.storage.auth_key(self._auth_key)\n await client.storage.user_id(user_id)\n await client.storage.date(0)\n await client.storage.is_bot(False)\n await client.storage.save()\n if self.USE_NEST_ASYNCIO:\n nest_asyncio.apply(self._loop)\n self._loop.run_until_complete(async_wrapper(client))\n \n else:\n self._make_telethon_sqlite_session_storoge(session_path, update_table=True, save=True)\n return True\n\n def make_tdata_folder(self, folder_name: str = \"tdata\"):\n pass\n","repo_name":"omidshm/TGSessionsConverter","sub_path":"tg_converter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"7889719377","text":"from django.urls import path\nfrom .views import index, courses_by_category, course_detail, course_content, course_content_redirect, course_add\napp_name = 'courses'\n\nurlpatterns = [\n path('', index, name='index'),\n path('category/', courses_by_category, name='courses_by_category_all'),\n path('category/',\n courses_by_category, name='courses_by_category'),\n path('course//', course_detail, name='course_detail'),\n path('course///lecture/',\n course_content_redirect, name='course_content_redirect'),\n path('course///lecture//',\n course_content, name='course_content'),\n\n\n]\n","repo_name":"BhattMukul/MusicCourses","sub_path":"courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27205337005","text":"import os\nfrom datetime import datetime\n\n\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory\nfrom werkzeug.utils import secure_filename\n\nserver = Flask(__name__)\nstrtimestamp = datetime.now().isoformat()\n\n# @server.route(\"/\")\ndef hello():\n return f'

{strtimestamp}

'\n\n@server.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n # if 'file' not in request.files:\n # flash('No file part')\n # return redirect(request.url)\n with open(\"/tmp/output_file\", \"bw\") as f:\n chunk_size = 4096\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) > 0:\n f.write(chunk)\n # If the user does not select a file, the browser submits an\n # empty file without a filename.\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and file.filename:\n filename = secure_filename(file.filename)\n file.save(os.path.join(server.config['UPLOAD_FOLDER'], filename))\n # return redirect(url_for('download_file', name=filename))\n return file.filename\n return '''\n \n Upload new File\n

Upload new File

\n
\n \n \n
\n '''\n\n# Download from provided URL.\n# @server.route('/')\n# def download(url):\n # req = requests.get(url, stream=True)\n # return Response(stream_with_context(req.iter_content()), content_type=req.headers['content-type'])\n\ndef upload(stream, fname):\n with open(\"/tmp/output_file\", \"bw\") as f:\n chunk_size = 4096\n while True:\n chunk = stream.read(chunk_size)\n if len(chunk) == 0:\n return\n f.write(chunk)\n\n\n@server.route('/uploads/')\ndef download_file(name):\n return send_from_directory(server.config[\"UPLOAD_FOLDER\"], name)\n\nif __name__ == \"__main__\":\n # server.config['UPLOAD_FOLDER'] = '/home/aaron/Downloads'\n server.config['UPLOAD_FOLDER'] = '/app/test_data'\n server.run(host='0.0.0.0')\n","repo_name":"ahoffer/gdal-service","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11379832797","text":"\"\"\"remove propic table and relations\n\nRevision ID: 490d5c2862fc\nRevises: 875619d22aaa\nCreate Date: 2022-05-25 13:50:32.928361\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '490d5c2862fc'\ndown_revision = '875619d22aaa'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('profile_pic')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('profile_pic',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('created_at', mysql.DATETIME(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=True),\n sa.Column('updated_at', mysql.DATETIME(), nullable=True),\n sa.Column('img_name', mysql.VARCHAR(length=100), nullable=False),\n sa.Column('user_id', mysql.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='profile_pic_ibfk_1'),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n # ### end Alembic commands ###\n","repo_name":"Arif-Badhon/echamber_backend","sub_path":"src/alembic/versions/490d5c2862fc_remove_propic_table_and_relations.py","file_name":"490d5c2862fc_remove_propic_table_and_relations.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13731226443","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncapture test\n\"\"\"\n\nfrom time import sleep\nfrom picamera import PiCamera\nimport sys\t\n\t\ndef main():\n\tcamera = PiCamera()\n\tPiCamera.CAPTURE_TIMEOUT = 10\t\t# IF FREEZES THEN EXITS AFTER 10 SEC?\n\tcamera.resolution = (1024,768)\n\tcamera.start_preview()\n\tsleep(2)\n\tcamera.capture('foo.jpg', use_video_port=True)\n\tsys.exit()\n\n\t\t\nif __name__ == \"__main__\":\tmain()\n","repo_name":"ANTZ314/raspi","sub_path":"picam/timelapse/Past/captureTest.py","file_name":"captureTest.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"46825535264","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport json\nimport copy\nimport os\nimport subprocess\n\n#标记语句\nLABEL_STATEMENT = \"line_number: \"\n#合约结果命名后缀\nINJECTED_CONTRACT_SUFFIX = \"_TOD.sol\"\n#标记文件命名后缀\nINJECTED_INFO_SUFFIX = \"_TODInfo.txt\"\n#结果保存路径\nDATASET_PATH = \"./dataset/\"\n#二进制运算标志\nBINARY_OPERATION_FLAG = \"BinaryOperation\"\n#布尔类型标志\nBOOL_FLAG = \"bool\"\n#插入标记字符串\nINJECTED_FLAG = \"\\t//injected TRANSACTION ORDER DEPENDENCE\"\n#存储额度关系的账本\nMAPPING_FLAG = \"mapping(address => uint256)\"\n#赋值符号\nASSIGN_FLAG = \"=\"\n#uint256标志\nUINT256_FLAG = \"uint256\"\n#相等判断符号\nEQU_FLAG = \"==\"\n#不相等判断符号\nUN_EQU_FLAG = \"!=\"\n#常量标志 \nLITERAL_FLAG = \"Literal\"\n#0常量字符串\nINT_CONST_0_FLAG = \"int_const 0\"\n#替换0的数字\nVALUE_1_STR = \"1\"\n#0的标识符\nVALUE_0_STR = \"0\"\n#标识符标志\nIDENTIFIER_FLAG = \"Identifier\"\n#布尔真值的字符串\nBOOL_TRUE_STR = \"true\"\n#mapping调用标志\nINDEX_ACCESS_FLAG = \"IndexAccess\"\n\nclass TODInjector:\n\tdef __init__(self, _contractPath, _infoPath, _astPath, _originalContractName):\n\t\tself.contractPath = _contractPath\n\t\tself.infoPath = _infoPath\n\t\tself.info = self.getInfoJson(self.infoPath)\n\t\tself.sourceCode = self.getSourceCode(self.contractPath)\n\t\tself.ast = self.getJsonAst(_astPath)\n\t\tself.preName = _originalContractName\n\t\ttry:\n\t\t\tos.mkdir(DATASET_PATH)\n\t\texcept:\n\t\t\t#print(\"The dataset folder already exists.\")\n\t\t\tpass\n\n\tdef getJsonAst(self, _astPath):\n\t\twith open(_astPath, \"r\", encoding = \"utf-8\") as f:\n\t\t\ttemp = json.loads(f.read())\n\t\treturn temp\n\n\tdef getInfoJson(self, _path):\n\t\twith open(_path, \"r\", encoding = \"utf-8\") as f:\n\t\t\ttemp = json.loads(f.read())\n\t\treturn temp\n\n\tdef getSourceCode(self, _path):\n\t\ttry:\n\t\t\twith open(_path, \"r\", encoding = \"utf-8\") as f:\n\t\t\t\treturn f.read()\n\t\texcept:\n\t\t\traise Exception(\"Failed to get source code when injecting.\")\n\t\t\treturn str()\n\n\t#该种bug的注入方式类似于整数溢出\n\t#先做个基础版出来-不考虑函数调用\n\tdef inject(self):\n\t\t#下述数据结构保存不同字符串和对应插入位置的关系\n\t\t#字典,Key插入位置,元素值-[结束位置,插入语句]\n\t\tsrcAndItsStr = dict()\n\t\t#1. 根据函数id找到函数\n\t\tapproveIdList = self.info[\"approveId\"]\n\t\t#2. 获得外部传入的参数-传入的函数一定是外部可见性的,因此每个参数外部都可以传入 \n\t\tfor _id in approveIdList:\n\t\t\t#获得函数ast\n\t\t\tfuncAst = self.findASTNode(self.ast, \"id\", _id)[0]\t#一定能找到ast,而且只有一个\n\t\t\t#进入函数中,找uint256型参数-根据搜寻函数的限制,有且只会有一个uint256型变量\n\t\t\tuintId = self.getExternalUintPara(funcAst)\n\t\t\t#2.1 首先找函数内的赋值语句,要求赋值的这个值必须是外部传入的参数,如果没有这个语句,直接不予注入\n\t\t\tblock = self.findASTNode(funcAst, \"name\", \"Block\")[0]\t#从块中找\n\t\t\tassignId, mappingId = self.getAssignment(block, uintId)\n\t\t\tif assignId == -1:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\t#2.2 然后进入到函数块中,寻找有没有语句对这个参数进行数值上的检验-要求这个数值一定要是0 (== 0, 或者 != 0)\n\t\t\t\t#如果没有这样的语句,就可以直接标注bug了-如果有将 == 0 换成一个非零值,例如 1\n\t\t\t\t#print(uintId, assignId)\n\t\t\t\t#记录赋值语句\n\t\t\t\tassignSpos, assignEpos = self.srcToPos(self.findASTNode(self.ast, \"id\", assignId)[0][\"src\"])\n\t\t\t\twhile self.sourceCode[assignEpos] != \"\\n\":\n\t\t\t\t\tassignEpos += 1\n\t\t\t\t#停下时指向换行符号\n\t\t\t\tsrcAndItsStr[assignEpos] = [assignEpos, INJECTED_FLAG]\n\t\t\t\tfor binaryOpe in self.findASTNode(block, \"name\", \"BinaryOperation\"):\n\t\t\t\t\t#要求符号,参与变量和数字常量符合要求\n\t\t\t\t\tif binaryOpe[\"attributes\"][\"type\"] == BOOL_FLAG and (binaryOpe[\"attributes\"][\"operator\"] == EQU_FLAG or binaryOpe[\"attributes\"][\"operator\"] == UN_EQU_FLAG):\n\t\t\t\t\t\t#符号符合要求\n\t\t\t\t\t\t#要记录常量的位置\n\t\t\t\t\t\topeList = binaryOpe[\"children\"]\n\t\t\t\t\t\tif len(opeList) != 2:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnum1 = [ope for ope in opeList if ope[\"name\"] == LITERAL_FLAG]\n\t\t\t\t\t\t\tnum2 = [ope for ope in opeList if ope[\"name\"] == IDENTIFIER_FLAG]\n\t\t\t\t\t\t\tnum3 = [ope for ope in opeList if ope[\"name\"] == INDEX_ACCESS_FLAG]\n\t\t\t\t\t\t\tif len(num1) == 1 and len(num2) == 1:\n\t\t\t\t\t\t\t\t#这是常量和标识符\n\t\t\t\t\t\t\t\tnum1 = num1[0]\n\t\t\t\t\t\t\t\tnum2 = num2[0]\n\t\t\t\t\t\t\t\t#num1必然是常量,num2必然是标识符\n\t\t\t\t\t\t\t\tif num2[\"attributes\"][\"referencedDeclaration\"] == uintId and num1[\"attributes\"][\"value\"] == VALUE_0_STR and num1[\"attributes\"][\"type\"] == INT_CONST_0_FLAG:\n\t\t\t\t\t\t\t\t\t#此时num1是想要的常量\n\t\t\t\t\t\t\t\t\t#[bug update] 把比较的那一段直接换成真值\n\t\t\t\t\t\t\t\t\tsPos, ePos = self.srcToPos(binaryOpe[\"src\"])\n\t\t\t\t\t\t\t\t\tsrcAndItsStr[sPos] = [ePos, BOOL_TRUE_STR]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telif len(num1) == 1 and len(num3) == 1:\n\t\t\t\t\t\t\t\t#这是常量和账本\n\t\t\t\t\t\t\t\tnum1 = num1[0]\n\t\t\t\t\t\t\t\tnum3 = num3[0][\"children\"][0][\"children\"][0]\n\t\t\t\t\t\t\t\t#num1必然是常量,num2必然是标识符\n\t\t\t\t\t\t\t\tif num3[\"attributes\"][\"referencedDeclaration\"] == mappingId and num1[\"attributes\"][\"value\"] == VALUE_0_STR and num1[\"attributes\"][\"type\"] == INT_CONST_0_FLAG:\n\t\t\t\t\t\t\t\t\t#此时num1是想要的常量\n\t\t\t\t\t\t\t\t\t#[bug update] 把比较的那一段直接换成真值\n\t\t\t\t\t\t\t\t\tsPos, ePos = self.srcToPos(binaryOpe[\"src\"])\n\t\t\t\t\t\t\t\t\tsrcAndItsStr[sPos] = [ePos, BOOL_TRUE_STR]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontinue\t\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t'''\n\t\t#3. 扩展approveIdList-该点改动是为了应对_approve函数\n\t\tself.getCalledId(approveIdList)\n\t\tprint(approveIdList)\n\t\t'''\n\t\tif not srcAndItsStr:\n\t\t\treturn False\t#没有可以注入的语句\n\t\t#3. 然后,在self.sourceCode中插入语句\n\t\tnewSourceCode, newInjectInfo = self.insertStatement(srcAndItsStr)\n\t\t#4. 输出并保存结果,然后产生自动标记\n\t\tself.storeFinalResult(newSourceCode, self.preName)\n\t\tself.storeLabel(newSourceCode, newInjectInfo, self.preName)\n\t\treturn True\n\n\t#返回值应该是赋值语句的id\n\tdef getAssignment(self, _blockAst, _id):\n\t\tflagId = -1\n\t\tassignId = -1\n\t\tfor assign in self.findASTNode(_blockAst, \"name\", \"Assignment\"):\n\t\t\ttry:\n\t\t\t\tif assign[\"attributes\"][\"operator\"] != ASSIGN_FLAG and assign[\"attributes\"][\"type\"] != UINT256_FLAG:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\t#参与的几个数字\n\t\t\t\t\tnumList = assign[\"children\"]\n\t\t\t\t\t'''\n\t\t\t\t\t一般来说,我们需要的赋值语句都是直接赋值的,右边是传入参数,左边是mapping(address => uint256)\n\t\t\t\t\t而不是通过任何运算\n\t\t\t\t\t'''\n\t\t\t\t\tif len(numList) != 2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tnum1 = numList[0][\"children\"][0]\n\t\t\t\t\t\tnum2 = numList[1]\n\t\t\t\t\t\tif num2[\"attributes\"][\"referencedDeclaration\"] == _id and num1[\"attributes\"][\"type\"] == MAPPING_FLAG:\n\t\t\t\t\t\t\tflagId = assign[\"id\"]\n\t\t\t\t\t\t\tassignId = num1[\"children\"][0][\"attributes\"][\"referencedDeclaration\"]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\treturn flagId, assignId\n\n\tdef getExternalUintPara(self, _funcAst):\n\t\t#获得参数列表-不是返回列表\n\t\tparaList = _funcAst[\"children\"][0]\t#列表类型-传入的approve函数必有uint256参数\n\t\treturn paraList[\"children\"][1][\"id\"]\n\n\tdef getCalledId(self, _list):\n\t\tcalleeIdList = list()\n\t\tfor funcId in _list:\n\t\t\tfuncAstList = self.findASTNode(self.ast, \"id\", funcId)\t#这里找到的是函数的ast\n\t\t\t#从中抽取functionCall\n\t\t\tfuncCallList = self.findASTNode(funcAstList[0], \"name\", \"FunctionCall\")\n\t\t\tif not funcCallList:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor funcCall in funcCallList:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif funcCall[\"children\"][0][\"attributes\"][\"referencedDeclaration\"] > 0 and funcCall[\"children\"][0][\"attributes\"][\"value\"] != REQUIRE_FLAG and funcCall[\"children\"][0][\"attributes\"][\"value\"] != ASSERT_FLAG:\n\t\t\t\t\t\t\t#加一个判断-事件也符合我们的标准,不抽取事件\n\t\t\t\t\t\t\t_id = funcCall[\"children\"][0][\"attributes\"][\"referencedDeclaration\"]\n\t\t\t\t\t\t\t#print(_id)\n\t\t\t\t\t\t\tast = self.findASTNode(self.ast, \"id\", _id)[0]\t#只会有一个值\n\t\t\t\t\t\t\t#print(\"hahahha\")\n\t\t\t\t\t\t\tif ast[\"name\"] != \"EventDefinition\":\n\t\t\t\t\t\t\t\tcalleeIdList.append(_id) \n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\texcept:\n\t\t\t\t\t\tcontinue\n\t\t#print(calleeIdList)\n\t\t_list.extend(calleeIdList)\n\n\n\tdef storeFinalResult(self, _sourceCode, _preName):\n\t\twith open(os.path.join(DATASET_PATH, _preName + INJECTED_CONTRACT_SUFFIX), \"w+\", encoding = \"utf-8\") as f:\n\t\t\tf.write(_sourceCode)\n\t\treturn\n\t\n\tdef storeLabel(self, _sourceCode, _dict, _preName):\n\t\tstartIndex = _sourceCode.find(INJECTED_FLAG)\n\t\tlineBreak = \"\\n\"\n\t\tlabelLineList = list()\n\t\twhile startIndex != -1:\n\t\t\tnum = _sourceCode[:startIndex].count(lineBreak) + 1\n\t\t\tlabelLineList.append(LABEL_STATEMENT + str(num) + lineBreak)\n\t\t\tstartIndex = _sourceCode.find(INJECTED_FLAG, startIndex + len(INJECTED_FLAG))\n\t\twith open(os.path.join(DATASET_PATH, _preName + INJECTED_INFO_SUFFIX), \"w+\", encoding = \"utf-8\") as f:\n\t\t\tf.writelines(labelLineList)\n\t\treturn\n\n\tdef insertStatement(self, _insertInfo):\n\t\ttempCode = str()\t\n\t\ttempDict = copy.deepcopy(_insertInfo) #使用副本\n\t\tstartIndex = 0\n\t\tindexList = sorted(_insertInfo.keys())\n\t\toffset = list()\n\t\tfor index in indexList:\n\t\t\ttempCode += self.sourceCode[startIndex: index] + _insertInfo[index][1]\n\t\t\tstartIndex = _insertInfo[index][0]\n\t\t\toffset.append(len(_insertInfo[index][1]) + (_insertInfo[index][0] - index))\n\t\t\tnewIndex = index + sum(offset)\n\t\t\ttempDict[newIndex] = tempDict.pop(index)\n\t\ttempCode += self.sourceCode[startIndex:]\n\t\treturn tempCode, tempDict\n\t\t\n\tdef findASTNode(self, _ast, _name, _value):\n\t\tqueue = [_ast]\n\t\tresult = list()\n\t\tliteralList = list()\n\t\twhile len(queue) > 0:\n\t\t\tdata = queue.pop()\n\t\t\tfor key in data:\n\t\t\t\tif key == _name and data[key] == _value:\n\t\t\t\t\tresult.append(data)\n\t\t\t\telif type(data[key]) == dict:\n\t\t\t\t\tqueue.append(data[key])\n\t\t\t\telif type(data[key]) == list:\n\t\t\t\t\tfor item in data[key]:\n\t\t\t\t\t\tif type(item) == dict:\n\t\t\t\t\t\t\tqueue.append(item)\n\t\treturn result\n\n\t#传入:657:17:0\n\t#传出:657, 674\n\tdef srcToPos(self, _src):\n\t\ttemp = _src.split(\":\")\n\t\treturn int(temp[0]), int(temp[0]) + int(temp[1])\n\n\n\tdef output(self):\n\t\tpass","repo_name":"xf97/HuangGai","sub_path":"src/securityAbandonerAndInjector/transactionOrderDependancy/TODInjector.py","file_name":"TODInjector.py","file_ext":"py","file_size_in_byte":10132,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"40090544432","text":"class details:\n year = 2020\n\n\n def __init__(self,name,age,place):\n self.name = name\n self.age = age\n self.place = place\n\n\n def addage(self):\n self.age = self.age + 1\n\n\n def disp(self):\n print(\"----------------------------------------------------------------------------------------------------------\")\n print(\"Year\",details.year)\n print(\"Name :\", self.name)\n print(\"age\", self.age)\n print(\"place:\", self.place)\n\n\n @classmethod\n def add_year(cls):\n cls.year = cls.year+1\n\n\nname = input(\"enter a name\")\nage = int(input(\"enter age\"))\nplace = input(\"enter place\")\n\n\nx = details(name,age,place)\nx.disp()\n\ndetails.year = details.year+1\nx.addage()\nprint(\". . . .. . . . . . . . . . . . . . . . . .A F T E R 1 Y E A R. . . . . . . . . . . . . . . . . . . . . . . . . \")\nx.disp()\n\n\ndetails.add_year()\nx.addage()\nprint(\". . . .. . . . . . . . . . . . . . . . . .A F T E R 2 Y E A R. . . . . . . . . . . . . . . . . . . . . . . . . \")\nx.disp()\n","repo_name":"Bibin22/pythonpgms","sub_path":"Tutorials/Class Prgms/class constrctr.py","file_name":"class constrctr.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42245164489","text":"#!/usr/bin/env python\n\n\"\"\"\nlogic programming.\ntrying logpy and pyke.\n(c) Johannes Ahlmann, 2015-12-16, licensed CC0\n\"\"\"\n\nimport doctest\nfrom functools import reduce\nimport re\n\ndef check(dct, key_val):\n \"\"\" check dictionary against equal integer or missing \"\"\"\n key, val = key_val\n try:\n return dct[key] == val\n except KeyError:\n return True\n\ndef check2(dct, key_val):\n \"\"\" check dictionary against key-specific metrics or missing \"\"\"\n key, val = key_val\n try:\n if key in ['cats', 'trees']:\n return dct[key] > val\n if key in ['pomeranians', 'goldfish']:\n return dct[key] < val\n else:\n return dct[key] == val\n except KeyError:\n return True\n\ndef parse(line):\n \"\"\"\n >>> a, b = parse('Sue 1: goldfish: 9, cars: 0, samoyeds: 9')\n >>> (a, sorted(b.items()))\n (1, [('cars', 0), ('goldfish', 9), ('samoyeds', 9)])\n \"\"\"\n aunt, rest = re.match(r'Sue (\\d+): (.*)', line).groups()\n parts = [(a, int(b)) for a, b in\n [tuple(s.split(r': ')) for s in rest.split(', ')]]\n return (int(aunt), dict(parts))\n\ndef solve(aunts, mfcsam, check_func):\n \"\"\" filter aunts against all records in mfcsam \"\"\"\n def func(lst, item):\n return [(i, c) for i, c in lst if check_func(c, item)]\n return reduce(func, mfcsam.items(), aunts)\n\nMFCSAM = {'children': 3,\n 'cats': 7,\n 'samoyeds': 2,\n 'pomeranians': 3,\n 'akitas': 0,\n 'vizslas': 0,\n 'goldfish': 5,\n 'trees': 3,\n 'cars': 2,\n 'perfumes': 1\n }\n\nif __name__ == \"__main__\":\n doctest.testmod()\n\n with open('input.txt', 'r') as inp:\n AUNTS = [parse(line) for line in inp.readlines()]\n print(solve(AUNTS, MFCSAM, check))\n print(solve(AUNTS, MFCSAM, check2))\n\n","repo_name":"codinguncut/adventofcode","sub_path":"day_16/day_16.py","file_name":"day_16.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39903196774","text":"\"\"\"sample model execution\"\"\"\nimport examples.basics.model.bpmn_example_model as model_module\nfrom simpy import Environment\n\n\ndef test_simple_run():\n \"\"\"run model\"\"\"\n\n env = Environment()\n model = model_module.Model(env)\n model.source.max_entities = 10 ** 3 # increase for higher runtime\n model.source.inter_arrival_time = 0 # increase for lower runtime\n\n model.env.run()\n\n assert model.sink.overall_count_in == model.source.overall_count_in\n assert model.sink.overall_count_in == model.source.max_entities\n\n print(\"\\n\\nsimulation done.\\n\\n\")\n\n\nif __name__ == \"__main__\":\n test_simple_run()\n","repo_name":"fladdimir/casymda","sub_path":"examples/basics/model/long_run_bpmn_example_test.py","file_name":"long_run_bpmn_example_test.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"21"} +{"seq_id":"35394770142","text":"# (c)2020 TeleBot\n#\n# You may not use this plugin without proper authorship and consent from @TeleBotSupport\n#\n# Creted by @buddhhu, @itzsjdude\n#\nimport asyncio\nimport os\n\nfrom telebot import CMD_HELP\nfrom telebot.utils import admin_cmd, sudo_cmd\n\n\n@telebot.on(admin_cmd(pattern=\"repack ?(.*)\", outgoing=True))\n@telebot.on(sudo_cmd(pattern=\"repack ?(.*)\", allow_sudo=True))\nasync def _(event):\n a = await event.get_reply_message()\n input_str = event.pattern_match.group(1)\n b = open(input_str, \"w\")\n b.write(str(a.message))\n b.close()\n a = await event.reply(f\"**Packing into** `{input_str}`\")\n await asyncio.sleep(2)\n await a.edit(f\"**Uploading** `{input_str}`\")\n await asyncio.sleep(2)\n await event.client.send_file(event.chat_id, input_str)\n await a.delete()\n os.remove(input_str)\n\n\nCMD_HELP.update(\n {\n \"repack\": \".repack \\nUse - Pack the text and send as a file.\"\n }\n)\n","repo_name":"xditya/TeleBot","sub_path":"telebot/plugins/repack.py","file_name":"repack.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"21"} +{"seq_id":"7537775914","text":"import pandas as pd\nimport torch\nfrom sklearn import preprocessing\nfrom torch.utils.data import DataLoader\nfrom flask import Flask, request, jsonify\nfrom src.train import RecsysModel\n\nlbl_user = preprocessing.LabelEncoder()\nlbl_job = preprocessing.LabelEncoder()\nlbl_user.classes_ = pd.read_csv('./csvs/lbl_user_classes_02.csv')['classes']\nlbl_job.classes_ = pd.read_csv('./csvs/lbl_job_classes_02.csv')['classes']\nnum_users_jobs = pd.read_csv('./csvs/num_users_jobs_02.csv')\n\n\nclass Tester:\n def __init__(self, model_path):\n self.model = RecsysModel(num_users=num_users_jobs['num_users'][0], num_jobs=num_users_jobs['num_jobs'][0])\n self.model.load_state_dict(torch.load(model_path))\n self.model.eval()\n\n def test(self, user_ids):\n recommendations = []\n\n for user_id in user_ids:\n if user_id in lbl_user.classes_:\n user_idx = lbl_user.transform([user_id])[0]\n job_scores = self.predict_jobs(user_idx)\n recommendations.append({\"user_id\": user_id, \"job_scores\": job_scores})\n else:\n recommendations.append({\"user_id\": user_id, \"job_scores\": []})\n\n return recommendations\n\n def predict_jobs(self, user_idx):\n user_idx_tensor = torch.tensor([user_idx], dtype=torch.long)\n job_ids_tensor = torch.arange(num_users_jobs['num_jobs'][0], dtype=torch.long)\n\n with torch.no_grad():\n job_scores = self.model(user_idx_tensor, job_ids_tensor).squeeze().numpy()\n\n return job_scores\n\napp = Flask(__name__)\nmodel_path = './models/trained_model_02.pth' # Specify the path to your trained model\ntester = Tester(model_path)\n\n@app.route('/recommend', methods=['POST'])\ndef recommend_jobs():\n data = request.get_json()\n user_id = data.get('user_id')\n\n if user_id is None:\n return jsonify({\"error\": \"User ID is missing in the request\"}), 400\n\n user_ids = [user_id] # You can add more user IDs to this list if needed\n recommendations = tester.test(user_ids)\n\n user_recommendations = []\n for recommended_jobs in recommendations:\n user_rec = {\n \"user_id\": recommended_jobs['user_id'],\n \"job_recommendations\": []\n }\n\n job_scores = recommended_jobs['job_scores']\n sorted_jobs = sorted(\n zip(lbl_job.classes_, job_scores),\n key=lambda x: x[1],\n reverse=True\n )\n\n for job, score in sorted_jobs:\n user_rec[\"job_recommendations\"].append({\"job\": job, \"score\": score})\n\n user_recommendations.append(user_rec)\n\n return jsonify(user_recommendations)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Faysel-Abdella/AfriHub-A2SV-Hackathon-Project","sub_path":"Recomendation_AI_api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15228466913","text":"import statistics\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\n\nfrom database.session import Session\n\n\ndef survey_fatigue():\n \"\"\"\n Analysis to check if survey fatigue was present in the survey.\n The survey key to consider can be changed in the code below.\n\n Calculates the average survey rating for each playlist (before being put in the \"correct\" position),\n if survey fatigue did take place, the ratings should go down over time.\n :return:\n \"\"\"\n\n key = \"like_rating\"\n # key = \"selection_rating\"\n # key = \"suitable_rating\"\n\n # key = \"like_rating_specific\"\n # key = \"selection_rating_specific\"\n # key = \"suitable_rating_specific\"\n\n specific_ratings = {\n \"playlist1\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n },\n \"playlist2\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n },\n \"playlist3\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n }\n }\n\n overall_ratings = {\n \"playlist1\": [],\n \"playlist2\": [],\n \"playlist3\": []\n }\n\n for user, session in Session.get_users_with_surveys():\n survey = user.survey\n\n for playlist_string in [f\"playlist{i}\" for i in range(1, 4)]:\n voting_rule_name = survey[\"metaData\"][playlist_string][\"rule_name\"][\"ruleName\"]\n\n if \"specific\" in key:\n specific_ratings[playlist_string][voting_rule_name].extend(\n [int(x) for _, x in survey[f\"{playlist_string}_{key}\"].items()]\n )\n overall_ratings[playlist_string].extend(\n [int(x) for _, x in survey[f\"{playlist_string}_{key}\"].items()]\n )\n else:\n specific_ratings[playlist_string][voting_rule_name].append(\n int(survey[f\"{playlist_string}_{key}\"])\n )\n overall_ratings[playlist_string].append(\n int(survey[f\"{playlist_string}_{key}\"])\n )\n\n boxplot_data = [overall_ratings[\"playlist1\"], overall_ratings[\"playlist2\"], overall_ratings[\"playlist3\"]]\n boxplot_labels = [\"Playlist 1\", \"Playlist 2\", \"Playlist 3\"]\n\n fig1, ax1 = plt.subplots()\n\n pyplot.locator_params(axis='y', nbins=5)\n\n ax1.boxplot(boxplot_data, labels=boxplot_labels,\n boxprops=dict(linestyle='-', linewidth=1.5),\n medianprops=dict(linestyle='-', linewidth=2),\n whiskerprops=dict(linestyle='-', linewidth=1.5),\n capprops=dict(linestyle='-', linewidth=1.5),\n showfliers=True\n )\n\n ax1.set_ylim((0.8, 5.2))\n ax1.set_ylabel(f\"Survey Rating\")\n ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)\n\n fig1.tight_layout()\n plt.show()\n\n result = \"Specific:\\n\"\n overall_result = \"Overall:\\n\"\n\n for playlist in specific_ratings:\n\n playlist_data = specific_ratings[playlist]\n result += f\"{playlist}: \"\n overall_result += f\"{playlist}: {statistics.mean(overall_ratings[playlist]):.2f},\" \\\n f\" {statistics.stdev(overall_ratings[playlist]):.2f}, \"\n\n for voting_rule in playlist_data:\n result += f\"{voting_rule}: {statistics.mean(playlist_data[voting_rule]):.2f},\" \\\n f\" {statistics.stdev(playlist_data[voting_rule]):.2f}\" \\\n f\" (length: {len(playlist_data[voting_rule]): =3d}), \"\n result += \"\\n\"\n overall_result += \"\\n\"\n\n print(result[:-3])\n print(overall_result[:-3])\n","repo_name":"abansagi/GroupRecommendationThesis","sub_path":"Experiment Processing/experiment2/survey_fatigue.py","file_name":"survey_fatigue.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24029479877","text":"from discord.ext import commands\nimport discord\nimport aiosqlite\nimport config\nimport traceback\nimport asyncio, os\nfrom datetime import datetime\nfrom cogs.utils import errors as BotErrors\n\ndef get_pre(bot, message):\n return '' if message.author.id == bot.owner_id else '~'\n\ndef validate_config():\n try: # Kinda weird way of checking, but it works\n config.TOKEN = config.TOKEN\n config.LOG_CHANNEL = config.LOG_CHANNEL\n config.OWNER_ID = config.OWNER_ID\n config.EMBEDS = config.EMBEDS\n config.EMBED_COLOR = config.EMBED_COLOR\n config.COGS = config.COGS\n except:\n raise BotErrors.InvalidConfig(\"Invalid configuration file, please use the config example in the README of the repository\")\n print(\"Config is valid\")\n\nclass BotWatch(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n async def _register_bot(self, botid: int):\n await self.db.execute(\"INSERT INTO bots VALUES (?, ?)\", (botid, datetime.utcnow()))\n self.watcher_cache.append(botid)\n await self.db.commit()\n\nbot = BotWatch(command_prefix=get_pre, intents=discord.Intents.all())\n\nasync def launch(bot):\n bot.db = await aiosqlite.connect('bw.db')\n await bot.db.execute(\"CREATE TABLE IF NOT EXISTS bots (id int, watchingsince blob)\")\n\n bot.LOG_CHANNEL = config.LOG_CHANNEL\n bot.server_whitelist = [724456699280359425] #replace ID with servers you want allowed\n\n cur = await bot.db.execute(\"SELECT id FROM bots\")\n bot.watcher_cache = [bots[0] for bots in await cur.fetchall()]\n\n validate_config()\n\n\nasyncio.run(launch(bot))\n\nos.environ[\"JISHAKU_NO_UNDERSCORE\"] = \"True\"\nos.environ[\"JISHAKU_NO_DM_TRACEBACK\"] = \"True\" \nos.environ[\"JISHAKU_HIDE\"] = \"True\"\nfor c in config.COGS:\n try:\n bot.load_extension(c)\n print(f\"loaded cog: {c}\")\n except Exception:\n print(traceback.format_exc())\n\n@bot.event\nasync def on_ready():\n print(f\"{str(bot.user)} is connected\")\n print('-' * 20)\n\nbot.run(config.TOKEN)","repo_name":"averwhy/BotWatch","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"75105445173","text":"# 62. 二叉搜索树的第k个结点\n# 给定一棵二叉搜索树,请找出其中的第k小的结点。例如, (5,3,7,2,4,6,8) 中,按结点数值大小顺序第三小结点的值为4。\n\n\n# -*- coding:utf-8 -*-\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n # 返回对应节点TreeNode\n def KthNode(self, pRoot, k):\n # write code here\n res = self.BSTtoNodeList(pRoot)\n if(k>0 and k<=len(res)):\n return res[k-1]\n return None\n # 方法一 中序遍历 递归\n def BSTtoNodeList(self, pRoot):\n if(pRoot == None):\n return []\n if(pRoot.left == None and pRoot.right == None):\n return [pRoot]\n left = self.BSTtoNodeList(pRoot.left)\n right = self.BSTtoNodeList(pRoot.right)\n res = left+[pRoot]+right\n return res\n # 方法二 栈\n def BSTtoNodeList(self, pRoot):\n res = []\n stack = []\n nowNode = pRoot\n while(nowNode or len(stack)>0):\n while(nowNode):\n stack.append(nowNode)\n nowNode = nowNode.left\n nowNode = stack.pop()\n res.append(nowNode)\n nowNode = nowNode.right\n return res ","repo_name":"dingchaofan/AlgorithmSolution","sub_path":"SwordToOffer/SwordToOffer-PythonSolution/62_KthNode.py","file_name":"62_KthNode.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8889358256","text":"#*************************************************************************************************************\n# ________ ________ ___ ___ ________ ___ ___ ________ ________ ________ ________ ___\n# |\\___ ___\\\\ __ \\|\\ \\|\\ \\|\\ ____\\|\\ \\|\\ \\|\\ ____\\|\\ __ \\|\\ __ \\|\\ __ \\|\\ \\\n# \\|___ \\ \\_\\ \\ \\|\\ \\ \\ \\\\\\ \\ \\ \\___|\\ \\ \\\\\\ \\ \\ \\___|\\ \\ \\|\\ \\ \\ \\|\\ \\ \\ \\|\\ \\ \\ \\\n# \\ \\ \\ \\ \\ \\\\\\ \\ \\ \\\\\\ \\ \\ \\ \\ \\ __ \\ \\ \\ \\ \\ __ \\ \\ _ _\\ \\ ____\\ \\ \\\n# \\ \\ \\ \\ \\ \\\\\\ \\ \\ \\\\\\ \\ \\ \\____\\ \\ \\ \\ \\ \\ \\____\\ \\ \\ \\ \\ \\ \\\\ \\\\ \\ \\___|\\ \\ \\\n# \\ \\__\\ \\ \\_______\\ \\_______\\ \\_______\\ \\__\\ \\__\\ \\_______\\ \\__\\ \\__\\ \\__\\\\ _\\\\ \\__\\ \\ \\__\\\n# \\|__| \\|_______|\\|_______|\\|_______|\\|__|\\|__|\\|_______|\\|__|\\|__|\\|__|\\|__|\\|__| \\|__|\n#\n# *************************************************************************************************************\n# Author: Rafael Fernández Flores (@Plata17 at GitHub)\n# Class name: RAM_DB.py\n# Description: This class creates some list and structures with info for the application. The DB class is\n# a singleton.\n# *************************************************************************************************************\n\nimport os\nimport platform\n\nfrom .MetaDataVLC import MetaDataVLC\nfrom lxml import etree\nfrom operator import itemgetter\n\n\nclass RAM_DB:\n \"\"\"\n This class creates some list and structures with info for the application. The DB class is\n a singleton.\n \"\"\"\n\n #Singleton pattern\n class __RAM_DB:\n def __init__(self):\n \"\"\"\n Constructor of the class.\n \"\"\"\n\n # List all the files in the desired format (MP3, WAV...)\n self.filesInFolder = []\n # List with the full path to all the files, for get the meta data\n self.pathFiles = []\n # Selected song of the list (the number is the index)\n self.selectionIndex = 0\n\n if platform.system() == \"Windows\":\n musicDir = str(os.path.expanduser(\"~\\Music\"))\n elif platform.system() == \"Linux\":\n musicDir = str(os.path.expanduser(\"~/Music\"))\n else:\n musicDir = \"Music\"\n\n for (dirpath, dirnames, filenames) in os.walk(musicDir):\n for x in filenames:\n if x.endswith(\".mp3\"):\n #or x.endswith(\".wav\")\n self.filesInFolder.append(x)\n self.pathFiles.append(os.path.join(dirpath, x))\n\n metaDataVLC = MetaDataVLC(self.pathFiles)\n self.metaDataList = metaDataVLC.getMetaData\n\n self.__sortAudioDBByNames()\n\n self.currentMenu = \"MainMenu\"\n\n\n def __sortAudioDBByNames(self):\n \"\"\"\n Sort all the songs alphabetically by the name of the songs\n \"\"\"\n\n metaDataNames= []\n\n for i in range(0, len(self.metaDataList)):\n metaDataNames.append(self.metaDataList[i][0])\n\n metaDataNames, self.pathFiles, self.filesInFolder = zip(*sorted(zip(metaDataNames, self.pathFiles, self.filesInFolder)))\n self.metaDataList = sorted(self.metaDataList, key=itemgetter(0))\n\n\n def getArtworkNotFoundPath(self):\n \"\"\"\n Returns a string with the path of the artwork not found image.\n :return: String with the path.\n \"\"\"\n\n return \"themes/default/img/artworkNotFound.png\"\n\n\n def getAudioDB(self):\n \"\"\"\n Returns the AudioDB\n\n :return: Three lists of strings with all the data:\n \"\"\"\n\n return (self.filesInFolder, self.pathFiles, self.metaDataList)\n\n def setSelection(self, selectionIndex):\n \"\"\"\n Sets the current song's index.\n\n :param selectionIndex: Index of the song in the list\n \"\"\"\n\n self.selectionIndex = selectionIndex\n\n def getSelection(self):\n \"\"\"\n Returns the index of the current song.\n\n :return: Index of the current song.\n \"\"\"\n\n return self.selectionIndex\n\n def getIndexByPath(self, pathFile):\n \"\"\"\n Returns the index of the song in the list of songs by the path of the file.\n\n :param pathFile: Full path of the song.\n :return: Index of the song.\n \"\"\"\n\n return self.pathFiles.index(pathFile)\n\n def getIndexByFile(self, fileInFolder):\n \"\"\"\n Returns the index of the song in the list of songs by the file name.\n\n :param fileInFolder: File name of the song.\n :return: Index of the song.\n \"\"\"\n\n return self.filesInFolder.index(fileInFolder)\n\n def getCurrentMenu(self):\n \"\"\"\n Returns the current menu that the user is viewing.\n\n :return: String whith the name of the current menu.\n \"\"\"\n\n return self.currentMenu\n\n def setCurrentMenu(self, menu):\n \"\"\"\n Sets the current menu that the user is viewing.\n\n :param menu: String with the name of the menu.\n \"\"\"\n\n self.currentMenu = menu\n\n def getRadioChannels(self):\n \"\"\"\n Returns a list of tuples with the name and frequency of the memorized channels of radio.\n\n :return: List with a tuple (frequency, name)\n \"\"\"\n\n # Pre-allocating the size of the list, we have 9 items, one per memory button\n result = [None]*9\n\n # Opening the XML\n xmlFile = etree.parse(\"config/RadioChannels.xml\").getroot()\n\n\n for channelItem in xmlFile.findall('channel'):\n result[int(channelItem.get(\"id\"))] = ((float(channelItem.get('freq')), channelItem.text))\n\n return result\n\n def setRadioChannel(self, id, freq, name):\n \"\"\"\n Sets the name and frequency into the memory bank of the radio menu.\n\n :param id: Id of the bank.\n :param freq: Frequency of the new channel.\n :param name: Name of the new channel.\n \"\"\"\n\n xmlFile = etree.parse(\"config/RadioChannels.xml\").getroot()\n\n for channelItem in xmlFile.findall('channel'):\n if(int(channelItem.get(\"id\")) == id):\n channelItem.set(\"freq\", str(freq))\n channelItem.text = name\n\n obj_xml = etree.tostring(xmlFile,\n pretty_print=True,\n xml_declaration=True)\n\n try:\n with open(\"config/RadioChannels.xml\", \"wb\") as xml_writer:\n xml_writer.write(obj_xml)\n except IOError:\n print(\"IOError: Error trying to write into the XML file.\")\n\n\n\n def __str__(self):\n return repr(self) + self.val\n\n instance = None\n\n def __init__(self):\n if not RAM_DB.instance:\n RAM_DB.instance = RAM_DB.__RAM_DB()\n\n def __getattr__(self, name):\n return getattr(self.instance, name)","repo_name":"rfernandezf/TouchCarPI","sub_path":"touchcarpi/main/DB/RAM_DB.py","file_name":"RAM_DB.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2910416301","text":"import commands2\nimport ctre\nimport wpilib\nimport wpimath.controller\nfrom commands2 import SubsystemBase\n\nfrom misc_constants.arm_constants import ArmConstants\n\n\nclass ArmSubsystem(SubsystemBase):\n # Create a new ArmSubsystem\n\n def __init__(self) -> None:\n super().__init__()\n\n self.arm_motor = wpilib.Spark(0)\n self.arm_motor.setInverted(True)\n\n self.arm_encoder = wpilib.DutyCycleEncoder(channel=0)\n self.arm_encoder.setPositionOffset(ArmConstants.kArmOffset)\n self.arm_encoder.setDutyCycleRange(0, 1)\n\n def periodic(self) -> None:\n wpilib.SmartDashboard.putNumber(\"Arm Encoder Value\", self.get_position())\n\n def get_position(self):\n return self.arm_encoder.getAbsolutePosition()\n\n def set_motor_speed(self, speed):\n self.arm_motor.set(speed)\n","repo_name":"frc4531/2023-swerve-code","sub_path":"subsystems/arm_subsystem.py","file_name":"arm_subsystem.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41760703641","text":"def soma(lista):\n total = 0\n for x in lista:\n total += x\n\n return total\n\n\ndef soma_recursiva(lista):\n if not lista:\n return 0 # caso base\n else:\n total = lista[0] + soma_recursiva(lista[1:]) # caso recursivo\n return total\n\n\nprint(\"Soma comum: \", soma([1, 2, 3, 4]))\nprint(\"Soma usando recursão: \", soma_recursiva([1, 2, 3, 4]))\n","repo_name":"jessicamosouza/book-grokking-algorithms","sub_path":"cap-4-quicksort/soma.py","file_name":"soma.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32752299927","text":"import os\nimport time\nimport torch\nfrom torch import optim\nimport torch.nn as nn\n\nfrom portuguese_ner_biomedical.dataset import Data, DataBERT\nfrom portuguese_ner_biomedical.model import CRF, LinearLayerCRF, BERTSlotFilling\nfrom portuguese_ner_biomedical.evaluation import Evaluation\nfrom portuguese_ner_biomedical.trainer import Trainer\n\n# General constants\nNUM_EXPERIMENTS = 1\nOUTPUT_PATH = 'output_files/'\nDATA_PATH = 'data_example/'\n\n# Linear Layer + CRF constants\nNUM_EPOCHS = 1\nBATCH = 1\n\n# BERT constants\nHIDDEN_DIM = 1024\n\n\ndef main():\n time_str = time.strftime(\"%Y_%m_%d-%H:%M:%S\")\n\n ################################################\n # CRF\n ################################################\n crf_output_folder = OUTPUT_PATH + ('crf_%s' % time_str) + '/'\n if not os.path.exists(crf_output_folder):\n os.makedirs(crf_output_folder)\n data_info = Data(DATA_PATH)\n crf = CRF()\n evaluation = Evaluation(crf_output_folder)\n\n print(\"Evaluating CRF:\")\n micro_avg_f1 = 0.0\n y_true = y_pred = test_tokens = []\n for num_experiment in range(NUM_EXPERIMENTS):\n x_train, y_train, x_test, y_true, test_tokens = crf.get_train_test_data(data_info)\n crf.fit(x_train, y_train)\n y_pred = crf.predict(x_test)\n micro_avg_f1 += evaluation.evaluate(num_experiment, y_true, y_pred)\n micro_avg_f1 /= NUM_EXPERIMENTS\n print()\n print('\\tMicro average F1: %.2f' % micro_avg_f1)\n\n evaluation.generate_output_csv('crf_output', y_true, y_pred, test_tokens)\n\n ################################################\n # Linear Layer + CRF\n ################################################\n linear_layer_crf_output_folder = OUTPUT_PATH + ('linear_layer_crf_%s' % time_str) + '/'\n if not os.path.exists(linear_layer_crf_output_folder):\n os.makedirs(linear_layer_crf_output_folder)\n\n data_info = Data(DATA_PATH)\n train_data, test_data = data_info.fit()\n vocab_size = len(data_info.vocab_in)\n num_classes = len(data_info.vocab_out)\n\n model = LinearLayerCRF(num_classes, vocab_size, data_info.out_w2id)\n optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)\n\n trainer = Trainer(model, BATCH, is_bert=False)\n evaluation = Evaluation(linear_layer_crf_output_folder)\n\n micro_avg_f1 = 0.0\n y_true_text = y_pred_text = test_tokens = []\n for num_experiment in range(NUM_EXPERIMENTS):\n y_true, y_pred = trainer.test(test_data)\n for epoch in range(1, NUM_EPOCHS + 1):\n trainer.train(train_data, optimizer, epoch)\n y_true, y_pred = trainer.test(test_data)\n\n # get test tokens and convert output from number to text\n test_tokens = [info[-1] for info in test_data]\n y_true_text = evaluation.convert_output_to_text(y_true, data_info.out_id2w)\n y_pred_text = evaluation.convert_output_to_text(y_pred, data_info.out_id2w)\n\n micro_avg_f1 += evaluation.evaluate(num_experiment, y_true_text, y_pred_text)\n\n micro_avg_f1 /= NUM_EXPERIMENTS\n print()\n print('Micro avg F1: %.2f' % micro_avg_f1)\n evaluation.generate_output_csv('linear_layer_output', y_true_text, y_pred_text, test_tokens)\n\n ################################################\n # BERT\n ################################################\n bert_output_folder = OUTPUT_PATH + ('bert_%s' % time_str) + '/'\n if not os.path.exists(bert_output_folder):\n os.makedirs(bert_output_folder)\n\n data_info = DataBERT(DATA_PATH)\n train_data, test_data = data_info.fit()\n\n # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n device = torch.device('cpu')\n num_classes = len(data_info.vocab_out)\n model = BERTSlotFilling(HIDDEN_DIM, num_classes)\n model.to(device)\n\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = optim.Adam(optimizer_grouped_parameters, lr=1e-5)\n weights = [1.] * num_classes\n weights[data_info.out_w2id['O']] = 0.01\n weights = torch.tensor(weights).to(device)\n criterion = nn.CrossEntropyLoss(weight=weights)\n\n evaluation = Evaluation(bert_output_folder)\n trainer = Trainer(model, BATCH, is_bert=True, criterion=criterion)\n\n micro_avg_f1 = 0.0\n y_true_text = y_pred_text = test_tokens = []\n for num_experiment in range(NUM_EXPERIMENTS):\n y_true, y_pred = trainer.test(test_data)\n for epoch in range(1, NUM_EPOCHS + 1):\n trainer.train(train_data, optimizer, epoch)\n y_true, y_pred = trainer.test(test_data)\n\n # get test tokens and convert output from number to text\n test_tokens = [info[-1] for info in test_data]\n y_true_text = evaluation.convert_output_to_text(y_true, data_info.out_id2w)\n y_pred_text = evaluation.convert_output_to_text(y_pred, data_info.out_id2w)\n\n micro_avg_f1 += evaluation.evaluate(num_experiment, y_true_text, y_pred_text)\n\n micro_avg_f1 /= NUM_EXPERIMENTS\n print()\n print('Micro avg F1: %.2f' % micro_avg_f1)\n evaluation.generate_output_csv('bert_output', y_true_text, y_pred_text, test_tokens)\n\n\nif __name__ == '__main__':\n main()","repo_name":"pavalucas/Bete","sub_path":"src/ner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36642957212","text":"from math import exp\nfrom typing import Callable\n\nimport noise\n\nElevationFn = Callable[[int, int], float]\n\n\ndef flat() -> ElevationFn:\n \"\"\"\n Create a callable that returns 0 for all elevations.\n\n Returns:\n A callable that computes z values for (x, y) inputs\n \"\"\"\n\n def fn(x: int, y: int) -> float:\n \"\"\"\n Return a constant, flat elevation value at every x and y point\n\n Arguments:\n x: The input x location (isn't used).\n y: The input y location (isn't used).\n\n Returns:\n The constant, flat elevation of 0.\n \"\"\"\n return 0\n\n return fn\n\n\ndef gaussian(\n amplitude: float, mu_x: float, mu_y: float, sigma_x: float, sigma_y: float\n) -> ElevationFn:\n \"\"\"\n Create a callable that returns the value of a Gaussian centered at (mu_x, mu_y) with\n variances given by sigma_x and sigma_y. The input A will modify the final amplitude.\n\n Arguments:\n amplitude: The Gaussian amplitude\n mu_x: The mean/center in the x direction\n mu_y: The mean/center in the y direction\n sigma_x: The variance in the x direction\n sigma_y: The variance in the y direction\n\n Returns:\n A callabe that computes z values for (x, y) inputs\n \"\"\"\n\n def fn(x: int, y: int) -> float:\n \"\"\"\n Return the gaussian function value at the specified point.\n\n Arguments:\n x: the input x coordinate\n y: the input y coordinate\n\n Returns:\n The output z coordinate computed by the function\n \"\"\"\n\n exp_term = ((x - mu_x) ** 2 / (4 * sigma_x**2)) + (\n (y - mu_y) ** 2 / (4 * sigma_y**2)\n )\n z = amplitude * exp(-exp_term)\n return z\n\n return fn\n\n\ndef perlin(\n octaves: int,\n persistence: float,\n lacunarity: float,\n seed: int,\n range_min: float,\n range_max: float,\n) -> ElevationFn:\n \"\"\"\n Create a callable that returns the value of a 2D Perlin noise function.\n\n Arguments:\n octaves: specifies the number of passes, defaults to 1 (simple noise).\n persistence: specifies the amplitude of each successive octave relative\n to the one below it. Defaults to 0.5 (each higher octave's amplitude\n is halved). Note the amplitude of the first pass is always 1.0.\n lacunarity: specifies the frequency of each successive octave relative\n to the one below it, similar to persistence. Defaults to 2.0.\n seed: The seed to used to generate random terrain. `seed` takes the place of the\n `base` argument in the `snoise2()` function, which adds offsets to the\n input (x, y) coordinates to get new terrain\n range_min: The minimum amplitude to scale to\n range_max: The maximum amplitude to scale to\n\n Returns:\n A callable that computes Perlin Noise z-values for (x, y) inputs\n \"\"\"\n if range_min >= range_max:\n raise ValueError(f\"range_min={range_min} must be less than range_max={range_max}\")\n\n def fn(x: int, y: int) -> float:\n \"\"\"\n Return the generated Perlin Noise function at the specified value.\n\n Arguments:\n x: the input x coordinate\n y: the input y coordinate\n\n Returns:\n The output z coordinate computed by the function\n \"\"\"\n z = noise.snoise2(x, y, octaves, persistence, lacunarity, base=seed)\n # Normalize to [0, 1]\n z = (z + 1) / 2\n # Scale to [0, range_max-range_min]\n z = z * (range_max - range_min)\n # Add to normalize to [range_min, range_max]\n z = z + range_min\n return z\n\n return fn\n","repo_name":"mitrefireline/simfire","sub_path":"simfire/world/elevation_functions.py","file_name":"elevation_functions.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37618351213","text":"\"\"\"Serializer Turno\"\"\"\nfrom rest_framework import serializers\n\nfrom htch.peliculas.models import Turno\n\n\nclass TurnoModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = Turno\n fields = (\n 'id_turno', 'turno_fech', 'turno_hora',\n 'turno_estado', 'pelicula'\n )\n\n","repo_name":"tigrinustrade/humantech_prueba","sub_path":"htch/peliculas/serializers/turno.py","file_name":"turno.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74991438131","text":"# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\nimport os\nfrom senf import fsnative\n\nimport quodlibet.config\nfrom quodlibet.formats import AudioFile\nfrom quodlibet.formats.mp3 import MP3File\nfrom quodlibet.library import SongLibrary\nfrom quodlibet.qltk.lyrics import LyricsPane\nfrom tests import TestCase, init_fake_app, destroy_fake_app, get_data_path\nfrom tests.helper import get_temp_copy\n\nLYRICS = \"foobär...\\nMore cowbell!©\"\n\n\ndef AF(*args, **kwargs):\n a = AudioFile(*args, **kwargs)\n a.sanitize()\n return a\n\n\nclass TLyricsPane(TestCase):\n\n def setUp(self):\n quodlibet.config.init()\n init_fake_app()\n self.pane = None\n self.library = SongLibrary()\n\n def tearDown(self):\n destroy_fake_app()\n self.library.destroy()\n quodlibet.config.quit()\n if self.pane:\n self.pane.destroy()\n\n def test_construction(self):\n af = AF({\"~filename\": fsnative(u\"/dev/null\")})\n self.pane = LyricsPane(af)\n\n def test_save_lyrics(self):\n af = self.temp_mp3()\n self.pane = LyricsPane(af)\n self.pane._save_lyrics(af, LYRICS)\n self.failUnlessEqual(af(\"~lyrics\"), LYRICS)\n\n def test_save_encoded_lyrics(self):\n af = self.temp_mp3()\n self.pane = LyricsPane(af)\n self.pane._save_lyrics(af, LYRICS)\n self.failUnlessEqual(af(\"~lyrics\"), LYRICS)\n\n def test_save_lyrics_deletes_lyric_file(self):\n af = self.temp_mp3()\n lf_name = af.lyric_filename\n os.makedirs(os.path.dirname(lf_name))\n with open(lf_name, \"wb\") as f:\n f.write(LYRICS.encode(\"utf-8\"))\n self.failUnless(os.path.exists(lf_name))\n self.pane = LyricsPane(af)\n self.pane._save_lyrics(af, LYRICS)\n self.failIf(os.path.exists(lf_name))\n\n def temp_mp3(self):\n name = get_temp_copy(get_data_path(\"silence-44-s.mp3\"))\n af = MP3File(name)\n af.sanitize()\n return af\n","repo_name":"quodlibet/quodlibet","sub_path":"tests/test_qltk_lyrics.py","file_name":"test_qltk_lyrics.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":1306,"dataset":"github-code","pt":"21"} +{"seq_id":"33062537623","text":"import os\n\nfiles_key = []\nfiles_values = []\n\nwith os.scandir(r'C:\\Users\\sashf\\PycharmProjects\\pythonProject7.3\\Home7_3') as entries:\n for entry in entries:\n files_key.append(entry.name)\n\ndef files_dict(files_key):\n\n for file in files_key:\n # print(file)\n file = open(file, encoding='utf-8')\n files_values.append(file.readlines())\n global dict_files\n dict_files = dict(zip(files_key, files_values))\n file.close()\n return dict_files\nfiles_dict(files_key)\n\nsorted_dict = {}\ndef files_write(dict_files):\n sorted_dict = ({k:v for k, v in sorted(dict_files.items(), key=lambda x: len(x[1]))})\n file = open('file_overall.txt', 'w', encoding='utf-8')\n for k,v in sorted_dict.items():\n file.write(k + '\\n')\n file.write(' ')\n file.write(str(len(v)) + '\\n')\n file.writelines(v)\n file.close()\n\nfiles_write(dict_files)\n\n\n\n\ndef file_path(file_name: str) -> str:\n for root, dirnames, filenames in os.walk('.'):\n for file in filenames:\n if file == file_name:\n print(file)\n print(filenames)\n return os.path.join(root, file)\n\nfile_path(files_key)","repo_name":"anfilippov7/Homework-7","sub_path":"Home work #7.3.py","file_name":"Home work #7.3.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20567274940","text":"import os\nfrom torch import nn\n\ndef init_weights(modules):\n for module in modules:\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(module.weight.data)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.Linear):\n nn.init.normal_(module.weight, 0, 0.01)\n nn.init.constant_(module.weight, 0)\n\ndef ensuredir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\ndef logging(msg, *args, out=True):\n dir = './logs'\n ensuredir(dir)\n if out:\n print(msg)\n for file in args:\n f = open(dir + '/' + file, 'a')\n f.write(msg + '\\n')\n f.flush()\n","repo_name":"shinobu-x/fully_rotatation_invariant_convolutional_attention_mlp","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32462495288","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Spaghetti: Web Application Security Scanner\n#\n# @url: https://github.com/m4ll0k/Spaghetti\n# @author: Momo Outaadi (M4ll0k)\n# @license: See the file 'doc/LICENSE'\n\nimport re\n\nclass Sitelock:\n @staticmethod\n def run(content):\n _ = False\n _ = re.search(r'SiteLock Incident ID',content,re.I) is not None\n if _:\n \treturn \"TrueShield Web Application Firewall (SiteLock)\"","repo_name":"ryanmrestivo/red-team","sub_path":"Web-Application-Attack/Security_Spaghetti/modules/fingerprints/waf/sitelock.py","file_name":"sitelock.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"13206407530","text":"from re import search\nfrom sqlalchemy import func\n\nfrom eNMS.database import Session\nfrom eNMS.models import models\n\n\ndef fetch(model, allow_none=False, all_matches=False, **kwargs):\n query = Session.query(models[model]).filter_by(**kwargs)\n result = query.all() if all_matches else query.first()\n if result or allow_none:\n return result\n else:\n raise Exception(\n f\"There is no {model} in the database \"\n f\"with the following characteristics: {kwargs}\"\n )\n\n\ndef fetch_all(model, **kwargs):\n return fetch(model, allow_none=True, all_matches=True, **kwargs)\n\n\ndef count(model, **kwargs):\n return Session.query(func.count(models[model].id)).filter_by(**kwargs).scalar()\n\n\ndef get_query_count(query):\n count_query = query.statement.with_only_columns([func.count()]).order_by(None)\n return query.session.execute(count_query).scalar()\n\n\ndef objectify(model, object_list):\n return [fetch(model, id=object_id) for object_id in object_list]\n\n\ndef delete(model, allow_none=False, **kwargs):\n instance = Session.query(models[model]).filter_by(**kwargs).first()\n if allow_none and not instance:\n return None\n if hasattr(instance, \"type\") and instance.type == \"task\":\n instance.delete_task()\n serialized_instance = instance.serialized\n Session.delete(instance)\n return serialized_instance\n\n\ndef delete_all(*models):\n for model in models:\n for instance in fetch_all(model):\n delete(model, id=instance.id)\n\n\ndef export(model):\n return [instance.to_dict(export=True) for instance in fetch_all(model)]\n\n\ndef factory(cls_name, **kwargs):\n if {\"/\", '\"', \"'\"} & set(kwargs.get(\"name\", \"\")):\n raise Exception(\"Names cannot contain a slash or a quote.\")\n instance, instance_id = None, kwargs.pop(\"id\", 0)\n if instance_id:\n instance = fetch(cls_name, id=instance_id)\n elif \"name\" in kwargs:\n instance = fetch(cls_name, allow_none=True, name=kwargs[\"name\"])\n if instance:\n if kwargs.get(\"must_be_new\"):\n raise Exception(f\"There already is a {cls_name} with the same name.\")\n else:\n instance.update(**kwargs)\n else:\n instance = models[cls_name](**kwargs)\n Session.add(instance)\n return instance\n\n\ndef handle_exception(exc):\n match = search(\"UNIQUE constraint failed: (\\w+).(\\w+)\", exc)\n if match:\n return f\"There already is a {match.group(1)} with the same {match.group(2)}.\"\n else:\n return exc\n","repo_name":"arifh19/eNMS","sub_path":"eNMS/database/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"3755251152","text":"N = int(input())\n\nM = [0] * (N+1)\ncnts = [0] * (N+1)\n# print(\"M:\", M)\ndef dp(x):\n if x == 2:\n cnt = 0\n mem = []\n for num in range(10, 100):\n num_str = str(num)\n l = list(num_str)\n # print(l)\n if abs(int(l[0]) - int(l[1])) == 1:\n # print(l)\n mem.append(l)\n cnt += 1\n # print(cnt)\n # print(\"mem:\", mem)\n M[x] = mem\n cnts[x] = cnt\n return\n if x == 3:\n arr = []\n cnt = 0\n for mem in M[x-1]:\n for num in range(1, 10):\n if abs(num - int(mem[0])) == 1:\n # print(\"mem[0]:\", mem[0])\n cnt += 1\n arr.append(num)\n # print(\"arr:\", arr)\n M[x] = arr\n cnts[x] = cnt * cnts[x-1]\n return\n if M[x] != 0:\n return M[x]\n arr = []\n cnt = 0\n for mem in M[x-1]:\n for num in range(1, 10):\n if abs(num - mem) == 1:\n # print(\"mem[0]:\", mem[0])\n cnt += 1\n arr.append(num)\n # print(\"arr:\", arr)\n M[x] = arr\n cnts[x] = cnt * cnts[x-1]\n return\n\nfor i in range(2, 11):\n dp(i)\nprint(cnts[10] % 1000000000)\n\n# print(M)\n# print(cnts)","repo_name":"KB-team3/AlgoGGang","sub_path":"이우엽/Week_11/P1562_계단수.py","file_name":"P1562_계단수.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"7721734162","text":"from numba import njit as jit\nimport numpy as np\nfrom numpy import cos, sin\n\n\n@jit\ndef rotation_matrix(angle, axis):\n assert axis in (0, 1, 2)\n angle = np.asarray(angle)\n c = cos(angle)\n s = sin(angle)\n\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R = np.zeros(angle.shape + (3, 3))\n R[..., axis, axis] = 1.0\n R[..., a1, a1] = c\n R[..., a1, a2] = -s\n R[..., a2, a1] = s\n R[..., a2, a2] = c\n return R\n\n\n@jit\ndef alinspace(start, stop=None, num=50, endpoint=True):\n \"\"\"Return increasing, evenly spaced angular values over a specified interval.\"\"\"\n # Create a new variable to avoid numba crash,\n # See https://github.com/numba/numba/issues/5661\n if stop is None:\n stop_ = start + 2 * np.pi\n elif stop <= start:\n stop_ = stop + (np.floor((start - stop) / 2 / np.pi) + 1) * 2 * np.pi\n else:\n stop_ = stop\n\n if endpoint:\n return np.linspace(start, stop_, num)\n else:\n return np.linspace(start, stop_, num + 1)[:-1]\n\n\n@jit\ndef spherical_to_cartesian(v):\n r\"\"\"Compute cartesian coordinates from spherical coordinates (norm, colat, long). This function is vectorized.\n\n .. math::\n\n v = norm \\cdot \\begin{bmatrix}\n \\sin(colat)\\cos(long)\\\\\n \\sin(colat)\\sin(long)\\\\\n \\cos(colat)\\\\\n \\end{bmatrix}\n\n Parameters\n ----------\n v : numpy.ndarray\n Spherical coordinates in 3D (norm, colat, long). Angles must be in radians.\n\n Returns\n -------\n v : numpy.ndarray\n Cartesian coordinates (x,y,z)\n\n \"\"\"\n v = np.asarray(v)\n norm_vecs = np.expand_dims(np.asarray(v[..., 0]), -1)\n vsin = np.sin(v[..., 1:3])\n vcos = np.cos(v[..., 1:3])\n x = np.asarray(vsin[..., 0] * vcos[..., 1])\n y = np.asarray(vsin[..., 0] * vsin[..., 1])\n z = np.asarray(vcos[..., 0])\n return norm_vecs * np.stack((x, y, z), axis=-1)\n\n\n@jit\ndef planetocentric_to_AltAz(theta, phi):\n r\"\"\"Defines transformation matrix to convert from Planetocentric coordinate system\n to the Altitude-Azimuth system.\n\n .. math::\n t\\_matrix = \\begin{bmatrix}\n -\\sin(theta) & \\cos(theta) & 0\\\\\n -\\sin(phi)\\cdot\\cos(theta) & -\\sin(phi)\\cdot\\sin(theta) & \\cos(phi)\\\\\n \\cos(phi)\\cdot\\cos(theta) & \\cos(phi)\\cdot\\sin(theta) & \\sin(phi)\n \\end{bmatrix}\n\n Parameters\n ----------\n theta: float\n Local sidereal time\n phi: float\n Planetodetic latitude\n\n Returns\n -------\n t_matrix: numpy.ndarray\n Transformation matrix\n \"\"\"\n # Transformation matrix for converting planetocentric equatorial coordinates to topocentric horizon system.\n t_matrix = np.array(\n [\n [-np.sin(theta), np.cos(theta), 0],\n [\n -np.sin(phi) * np.cos(theta),\n -np.sin(phi) * np.sin(theta),\n np.cos(phi),\n ],\n [\n np.cos(phi) * np.cos(theta),\n np.cos(phi) * np.sin(theta),\n np.sin(phi),\n ],\n ]\n )\n return t_matrix\n","repo_name":"poliastro/poliastro","sub_path":"src/poliastro/core/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":806,"dataset":"github-code","pt":"21"} +{"seq_id":"10909346500","text":"# %%\nimport cv2\nimport numpy as np\n\ncaliL = np.load('camera_parameters/calibresultL.npy', allow_pickle=True)\ncaliR = np.load('camera_parameters/calibresultR.npy', allow_pickle=True)\ncaliDual = np.load('camera_parameters/calibresultDual.npy', allow_pickle=True)\n\n# intrinsic matrix of left camera\nleft_camera_matrix = caliL.item().get('mtx')\n# distortion coefficients of left camera\nleft_distortion = caliL.item().get('dist')\n\n# intrinsic matrix of right camera\nright_camera_matrix = caliR.item().get('mtx')\n# distortion coefficients of right camera\nright_distortion = caliR.item().get('dist')\n\n# rotation and transformation matrix of the right camera according to the left camera\nR = caliDual.item().get('r')\nT = caliDual.item().get('t')\n\n# size of images\nsize = (640, 480)\n\n\n# R1, R2 are the rotation matrices, P1, P2 are the projection matrices\n# stereoRectify is used to calculate the rectification transformation matrices\nR1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,\n right_camera_matrix, right_distortion, size, R,\n T)\n\n# map1 and map2 are remap matrix for x and y axes\n# initUndistortRectifyMap is used to calculate the remap matrix for x and y axes; used to rectify the images\nleft_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)\nright_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)\n\n","repo_name":"csl122/Stereo-Matching","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38910809958","text":"#!/usr/bin/python\n\nfrom scapy.all import *\n\n\nip = input(\"Introduce la dirección IP:\\n\")\nprint(\"Realizando scan ICMP...\")\n\n'''Para realizar un ICMP scan, envíamos un paquete ICMP y estudiamos la respuesta que nos ha facilitado el sistema'''\n\npaquete=IP(dst=ip)/ICMP()\n\nrespuesta_icmp= sr1(paquete,timeout=1, verbose=0)\n\nif respuesta_icmp == None:\n\tprint(\"El host en\",ip,\"no está disponible o no existe\\n\")\nelse:\n\tprint(\"El host en\",ip,\"está disponible\\n\")\n\n\t\n'''una vez realizado el scan mediante icmp de la IP, pasamos a realizar un SYN Stealth scan o Syn Scan o Half scan. Nos basaremos en los 20 puertos TCP más usados que suele escanear NMAP'''\n\npuertos=[80,23,443,21,22,25,3389,110,445,139,143,53,135,3306,8080,1723,111,995,993,5900]\nabiertos=[]\nfiltrados=[]\ncerrados=[]\nif respuesta_icmp != None:\n\tprint(\"Realizando SYN Stelath scan...\")\n\tfor puerto in puertos:\n\t\tp = IP(dst=ip)/TCP(sport=RandShort(), dport=puerto, flags='S')\n\t\trespuesta = sr1(p, timeout=2)\n\t\tif(respuesta is None):\n\t\t\tfiltrados.append(puerto)\n\t\t\tcontinue;\n\t\telif respuesta.haslayer(TCP):\n\t\t\tif respuesta.getlayer(TCP).flags == 0x14:\n\t\t\t\tcerrados.append(puerto)\n\t\t\t\tcontinue;\n\t\t\telif respuesta.getlayer(TCP).flags == 0x12:\n\t\t\t\tabiertos.append(puerto)\n\t\t\t\tcontinue; \n\t\t\telif (int(respuesta.getlayer(ICMP).type)==3 and int(respuesta.getlayer(ICMP).code) in [1,2,3,9,10,13]):\n\t\t\t\tfiltrados.append(puerto)\n\t\t\t\tcontinue;\nelse:\n\tprint(\"Fin del scanner\")\n\nprint(\"Los puertos abiertos son\", abiertos)\nprint(\"Los puertos filtrados son\", filtrados)\nprint(\"Los puertos cerrados son\", cerrados)\n\t\n\n","repo_name":"Trocsamas/SSII","sub_path":"PAI11_12/synstealth_icmp.py","file_name":"synstealth_icmp.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"35908864105","text":"project = 'X-HEEP-based FPGA EMUlation Platform (FEMU)'\ncopyright = '2023, EPFL'\nauthor = 'Simone Machetti'\n\nrelease = '1.0'\nversion = '1.0.0'\n\nextensions = [\n 'sphinx.ext.duration',\n 'sphinx.ext.doctest',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n]\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n}\nintersphinx_disabled_domains = ['std']\n\ntemplates_path = ['_templates']\n\nhtml_theme = 'sphinx_rtd_theme'\n\nepub_show_urls = 'footnote'\n","repo_name":"esl-epfl/x-heep-femu","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11347940826","text":"import os\nfrom datetime import datetime\n\n\ndef remove_download_files(downloads, now, retain):\n # Numbers to keep track of how many files removed and total that can be removed\n removed = 0\n total = 0\n\n print()\n\n # Go through my downloads and attempt to delete items that are over a certain # of days old\n for file in os.listdir(downloads):\n path = downloads + file\n\n if os.path.isfile(path) and not file.startswith(\".\", 0, 1):\n stat = os.stat(path)\n\n # Can only get birth time on Mac (convenient for me)\n try:\n birth_time_ts = stat.st_birthtime\n birth_time = datetime.fromtimestamp(birth_time_ts)\n\n # Get number of days difference between current time and birth time for file\n diff = now - birth_time\n\n if diff.days > retain:\n total += 1\n\n while True:\n will_delete = input(\"Do you want to delete: {file}? \".format(file=file))\n will_delete.lower()\n\n if will_delete == \"yes\" or will_delete == \"y\":\n os.remove(path)\n removed += 1\n break\n elif will_delete == \"no\" or will_delete == \"n\":\n break\n else:\n continue\n except AttributeError:\n print(\"Oops!\")\n\n print(\"\\n\"\n \"Removed {removed} files out of {total} eligible files.\"\n \"\\n\".format(removed=removed, total=total))\n\n\ndef main():\n # Get the downloads folder for the user\n downloads_folder = os.path.expanduser(\"~/Downloads/\")\n\n # How many days far back should we retain downloads (change to your liking)\n retain = int(input(\"How far back (in days) do you want to not delete files? \"))\n\n # Get current time\n now = datetime.now()\n\n remove_download_files(downloads_folder, now, retain)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bryan-vh/AutoDeleteDownloads","sub_path":"AutoDeleteDownloads.py","file_name":"AutoDeleteDownloads.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44811360434","text":"def getnum(A):\n n=len(A)\n number=0\n for i in range(n):\n number = number^i^A[i]\n number^=n\n return number\n\n\nif __name__=='__main__':\n A=[3,2,1,4]\n print(getnum(A))\n","repo_name":"ChiragSinghai/450-Questions","sub_path":"Chirag/Leetcode_problem/missing number leetcode-268.py","file_name":"missing number leetcode-268.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19207025908","text":"#!usr/bin/python3\n# -*- coding:utf-8 -*-\nimport json\nimport requests\nimport time\nimport pymongo\nimport logging\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nconnection = pymongo.MongoClient()\ntdb = connection.text\npostInfo = tdb.bilibiliVideo\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='log.log',\n filemode='w+')\npool = ThreadPool(8)\n\ndef process(i):\n getStat = requests.get('http://api.bilibili.com/archive_stat/stat?aid='+str(i)).text\n stat = json.loads(getStat)['data']\n favorite = stat['favorite']\n coin = stat['coin']\n reply = stat['reply']\n view = stat['view']\n share = stat['share']\n danmaku = stat['danmaku']\n # getShow = requests.get('http://api.bilibili.com/x/elec/show?&aid='+str(i)).text\n # show4th = json.loads(getShow)['data']['list'] # 4 dicts in this list\n # showCount = json.loads(getShow)['data']['count']\n getTag = requests.get('http://api.bilibili.com/x/tag/archive/tags?aid='+str(i)).text\n try:\n tags = json.loads(getTag)['data'] # lots of dicts in this list\n totalTag = ''\n for tag in tags:\n tagName = tag['tag_name']\n totalTag += tagName+\" \"\n print(totalTag)\n getReplyCount = requests.get('http://api.bilibili.com/x/v2/reply?jsonp=jsonp&type=1&oid='+str(i)).text\n replyCount = json.loads(getReplyCount)['data']['page']['count']\n for n in range(1, 1+replyCount//20):\n getReply= requests.get('http://api.bilibili.com/x/v2/reply?jsonp=jsonp&type=1&sort=2&oid='+str(i)+'&pn='+str(n)).text\n replies = json.loads(getReply)['data']['replies']\n for content in replies:\n message = content['content']['message']\n ctime = content['ctime']\n like = content['like']\n uname = content['member']['uname']\n sex = content['member']['sex']\n mid = content['mid']\n item = {}\n item['AV号'] = i\n item['播放数'] = view\n item['弹幕数量'] = danmaku\n item['分享数量'] = share\n item['收藏数量'] = favorite\n item['硬币数量'] = coin\n item['视频标签'] = totalTag\n item['总评论数量'] = reply\n item['评论'] = message\n item['评论发表时间'] = time.ctime(ctime)\n item['评论点赞数量'] = like\n item['发布评论者'] = uname\n item['发布评论者ID'] = mid\n item['发布评论者性别'] = sex\n postInfo.insert(item)\n except Exception:\n logging.info('AV'+str(i)+\"错误\")\n pass\n\nresult = pool.map(process, list(range(1,9999999)))\npool.close()\npool.join()\n\n","repo_name":"Momengs/bilibiliVideoInfoCrawler","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10101999982","text":"import os\r\nimport os.path\r\nfrom pathlib import Path\r\nimport re\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport pylab as pl\r\n\r\n\r\nimport xlwt\r\n\r\n\r\n#walter_meas\r\n\r\nDevice_name=\"ESDPMOS\"\r\n\r\nwalter_meas_path= r\"C:\\\\All_projects\\tc18d_model_assessment\\Data\\correct\\ESDPMOS\\\\\"\r\n\r\nwerner_meas_path= r\"C:\\\\All_projects\\tc18d_model_assessment\\Data\\correct\\ESD_all_correct\\ESDPMOS\\\\\"\r\n\r\nmodel_simulation_path=r\"C:\\\\All_projects\\tc18d_model_assessment\\assessment\\ESDPMOS\\LOG_add_contact_resistance_metal_small\\save_init_compa_tC18d_ESDPMOS\\data\\\\\"\r\n\r\nfullmap_point_data_path =r\"C:\\\\All_projects\\tc18d_model_assessment\\Data\\correct\\ESD_all_correct\\Point_data\\M009\\ESDPMOS\\\\\"\r\n\r\nTYPE=-1\r\n\r\n# directory_in_str=\"C:/All_projects/tc18d_model_assessment/Data/correct/ESD_all_correct/Point_data/M009/ESDNMOS\"\r\n\r\n# directory = os.fsencode(directory_in_str)\r\n\r\n#for file in os.listdir(directory):\r\n # filename = os.fsdecode(file)\r\n # if filename.endswith(\".txt\") or filename.endswith(\".py\"):\r\n # print(os.path.join(directory, filename))\r\n\r\n\r\n#directory_full_walter=\"C:\\All_projects\\tc18d_model_assessment\\Data\\correct\\ESDNMOS\"\r\nIdlin_initial_meas_die1_walter = []\r\nIdsat_initial_meas_die1_walter = []\r\n\r\nIdlin_initial_meas_die20_werner = []\r\nIdsat_initial_meas_die20_werner = []\r\n\r\nIdlin_TT=[]\r\nIdsat_TT=[]\r\n\r\nIdlin_SS=[]\r\nIdsat_SS=[]\r\n\r\nIdlin_FF=[]\r\nIdsat_FF=[]\r\n\r\nfor iter in range(1,13):\r\n Vgs = []\r\n\r\n#Walter Measurement\r\n for site in range(1,2):\r\n flname = \"M002xW6C442_W13x0{:02d}x\".format(site) + Device_name + \"x{:02d}xTP25.txt\".format(iter)\r\n print(flname)\r\n with open( walter_meas_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[92:93]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idlin_initial_meas_die1_walter.append(row[15])\r\n Idlin_initial_meas_die1_walter2 = np.array(Idlin_initial_meas_die1_walter)\r\n print(Idlin_initial_meas_die1_walter2)\r\n\r\n with open( walter_meas_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[1195:1196]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idsat_initial_meas_die1_walter.append(row[15])\r\n Idsat_initial_meas_die1_walter2 = np.array(Idsat_initial_meas_die1_walter)\r\n print(Idsat_initial_meas_die1_walter2)\r\n#################\r\n\r\n# Werner's Measurement\r\n\r\n for site in range(20,21):\r\n flname = \"M006xW6C442W13x0{:02d}x\".format(site) + Device_name + \"x{:02d}xTP25.txt\".format(iter)\r\n print(flname)\r\n with open( werner_meas_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[92:93]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idlin_initial_meas_die20_werner.append(row[15])\r\n Idlin_initial_meas_die20_werner2 = np.array(Idlin_initial_meas_die20_werner)\r\n print(Idlin_initial_meas_die20_werner2)\r\n\r\n with open( werner_meas_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[1195:1196]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idsat_initial_meas_die20_werner.append(row[15])\r\n Idsat_initial_meas_die20_werner2 = np.array(Idsat_initial_meas_die20_werner)\r\n print(Idsat_initial_meas_die20_werner2)\r\n\r\n # tC18d_ESDNMOS_PLOT__G1_L01_TR__IDVG_LIN_T_p025_tt_lib_25C\r\n # Model Simulation values\r\n for site in range(20,21):\r\n # flname = \"tC18d_ESDNMOS_PLOT__G{:d}_L{:02d}_OUT_IDVD_VB0_T_p025_tt_lib_25C.txt\".format(iter, iter)\r\n\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}\".format(iter) + \"_L{:02d}_TR__IDVG_LIN_T_p025_tt_lib_25C.txt\".format(iter)\r\n print(flname)\r\n with open( model_simulation_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[46:47]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idlin_TT.append(row[1])\r\n Idlin_TT2 = np.array(Idlin_TT)\r\n\r\n Idlin_TT5 = list(map(float, Idlin_TT2))\r\n\r\n if TYPE==-1:\r\n Idlin_TT3 = [i * -1 for i in Idlin_TT5]\r\n else:\r\n Idlin_TT3 = Idlin_TT5\r\n\r\n\r\n\r\n\r\n print(Idlin_TT2)\r\n print(Idlin_TT3)\r\n # Idlin_TT5=Idlin_TT2\r\n\r\n\r\n print(Idlin_TT2)\r\n# tC18d_ESDNMOS_PLOT__G1_L01_OUT_IDVD_VB0_T_p025_tt_lib_25C\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}_\".format(iter) + \"L{:02d}_OUT_IDVD_VB0_T_p025_tt_lib_25C.txt\".format(iter)\r\n print(flname)\r\n\r\n with open( model_simulation_path + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[36:37]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idsat_TT.append(row[3])\r\n Idsat_TT2 = np.array(Idsat_TT)\r\n\r\n\r\n\r\n Idsat_TT5 = list(map(float, Idsat_TT2))\r\n\r\n if TYPE == -1:\r\n Idsat_TT3 = [i * -1 for i in Idsat_TT5]\r\n else:\r\n Idsat_TT3=Idsat_TT5\r\n\r\n\r\n\r\n\r\n\r\n\r\n print(Idsat_TT2)\r\n\r\n # SS Model Sim Values\r\n\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}_\".format(iter) + \"L{:02d}_TR__IDVG_LIN_T_p025_ss_lib_25C.txt\".format(iter)\r\n print(flname)\r\n with open(model_simulation_path + flname) as f:\r\n lines_after_17 = []\r\n clean_lines = []\r\n lines_after_17 = f.readlines()[46:47]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idlin_SS.append(row[1])\r\n Idlin_SS2 = np.array(Idlin_SS)\r\n\r\n Idlin_SS5 = list(map(float, Idlin_SS2))\r\n\r\n Idlin_SS3 = [i * -1 for i in Idlin_SS5]\r\n\r\n print(Idlin_SS5)\r\n # tC18d_ESDNMOS_PLOT__G1_L01_OUT_IDVD_VB0_T_p025_tt_lib_25C\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}_\".format(iter) + \"L{:02d}_OUT_IDVD_VB0_T_p025_ss_lib_25C.txt\".format(iter)\r\n print(flname)\r\n\r\n with open(model_simulation_path + flname) as f:\r\n lines_after_17 = []\r\n clean_lines = []\r\n lines_after_17 = f.readlines()[36:37]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idsat_SS.append(row[3])\r\n Idsat_SS2 = np.array(Idsat_SS)\r\n\r\n Idsat_SS5 = list(map(float, Idsat_SS2))\r\n\r\n if TYPE == -1:\r\n Idsat_SS3 = [i * -1 for i in Idsat_SS5]\r\n else:\r\n Idsat_SS3 =Idsat_SS5\r\n\r\n\r\n print(Idsat_SS5)\r\n\r\n #FF sim Values\r\n\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}_\".format(iter) + \"L{:02d}_TR__IDVG_LIN_T_p025_ff_lib_25C.txt\".format(iter)\r\n print(flname)\r\n with open(model_simulation_path + flname) as f:\r\n lines_after_17 = []\r\n clean_lines = []\r\n lines_after_17 = f.readlines()[46:47]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idlin_FF.append(row[1])\r\n Idlin_FF2 = np.array(Idlin_FF)\r\n\r\n Idlin_FF5 = list(map(float, Idlin_FF2))\r\n\r\n if TYPE == -1:\r\n Idlin_FF3 = [i * -1 for i in Idlin_FF5]\r\n else:\r\n Idlin_FF3 = Idlin_FF5\r\n\r\n\r\n\r\n\r\n print(Idlin_FF3)\r\n\r\n print(Idlin_FF2)\r\n # tC18d_ESDNMOS_PLOT__G1_L01_OUT_IDVD_VB0_T_p025_tt_lib_25C\r\n flname = \"tC18d_\" + Device_name + \"_PLOT__G{:d}_\".format(iter) + \"L{:02d}_OUT_IDVD_VB0_T_p025_ff_lib_25C.txt\".format(iter)\r\n print(flname)\r\n\r\n with open(model_simulation_path + flname) as f:\r\n lines_after_17 = []\r\n clean_lines = []\r\n lines_after_17 = f.readlines()[36:37]\r\n print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Idsat_FF.append(row[3])\r\n Idsat_FF2 = np.array(Idsat_FF)\r\n\r\n Idsat_FF5 = list(map(float, Idsat_FF2))\r\n\r\n if TYPE == -1:\r\n Idsat_FF3 = [i * -1 for i in Idsat_FF5]\r\n else:\r\n Idsat_FF3 = Idsat_FF5\r\n\r\n\r\n\r\n\r\n print(Idsat_FF3)\r\n print(Idsat_FF5)\r\n\r\nprint(Idlin_initial_meas_die1_walter2[0])\r\nprint(Idlin_initial_meas_die1_walter2[3])\r\nprint(Idlin_initial_meas_die1_walter2[5])\r\n\r\nprint(Idsat_initial_meas_die1_walter2[0])\r\nprint(Idsat_initial_meas_die1_walter2[3])\r\nprint(Idsat_initial_meas_die1_walter2[5])\r\n\r\n\r\nprint(Idlin_initial_meas_die20_werner2[0])\r\nprint(Idlin_initial_meas_die20_werner2[3])\r\nprint(Idlin_initial_meas_die20_werner2[5])\r\n\r\nprint(Idsat_initial_meas_die20_werner2[0])\r\nprint(Idsat_initial_meas_die20_werner2[3])\r\nprint(Idsat_initial_meas_die20_werner2[5])\r\n\r\nprint(Idlin_TT2)\r\nprint(Idsat_TT2)\r\n\r\nprint(Idlin_SS2)\r\nprint(Idsat_SS2)\r\n\r\n\r\nprint(Idlin_FF2)\r\nprint(Idsat_FF2)\r\n\r\n\r\nIdlin_initial_meas_die1_walter3 = list(map(float, Idlin_initial_meas_die1_walter2))\r\nIdsat_initial_meas_die1_walter3 = list(map(float, Idsat_initial_meas_die1_walter2))\r\n\r\n\r\nIdlin_initial_meas_die20_werner3 = list(map(float, Idlin_initial_meas_die20_werner2))\r\nIdsat_initial_meas_die20_werner3 = list(map(float, Idsat_initial_meas_die20_werner2))\r\n\r\n\r\nprint(Idlin_TT2)\r\nprint(Idlin_TT5)\r\n\r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\n#Idlin_TT3 = list(map(float, Idlin_TT5))\r\n#Idsat_TT3 = list(map(float, Idsat_TT2))\r\n\r\n\r\n#Idlin_SS3 = list(map(float, Idlin_SS2))\r\n#Idsat_SS3 = list(map(float, Idsat_SS2))\r\n\r\n\r\n#Idlin_FF3 = list(map(float, Idlin_FF2))\r\n#Idsat_FF3 = list(map(float, Idsat_FF2))\r\n\r\n\r\n\r\nIdlin_initial_meas_die1_walter=0.00185403\r\nIdsat_initial_meas_die1_walter=0.0093832\r\n\r\nIdlin_initial_meas_die20=1.7776e-3\r\nIdsat_initial_meas_die20=0.010085\r\n\r\nIdlin_initial_red_die20=0.0017191\r\nIdsat_initial_red_die20=0.010057\r\n\r\nIdlin_initial_red_die21=0.0016723\r\nIdsat_initial_red_die21=0.010053\r\n\r\nIdlin_initial_red_die22=0.0017381\r\nIdsat_initial_red_die22=0.010136\r\n\r\n\r\nIdlin_model_TT=[2.1572759e-03]\r\nIdlin_model_SS=[1.8527796e-03]\r\nIdlin_model_FF=[2.4646430e-03]\r\n\r\nIdsat_model_TT=[1.1446495e-02]\r\nIdsat_model_SS=[9.5960417e-03]\r\nIdsat_model_FF=[1.2995159e-02]\r\n\r\n\r\n\r\n# Point data Meas\r\n\r\nfor iter in range(1,13):\r\n Vgs = []\r\n Ids = []\r\n\r\n for site in range(1,40):\r\n flname = \"M009xW6C442W13x0{:02d}x\".format(site) + Device_name + \"x{:02d}xTP25.txt\".format(iter)\r\n print(flname)\r\n # path = 'C:\\\\Users\\\\Username\\\\Path\\\\To\\\\File'\r\n with open( fullmap_point_data_path + flname) as f:\r\n\r\n # with open( \"C:/All_projects/tc18d_model_assessment/Data/correct/ESD_all_correct/Point_data/M008/\" + flname) as f:\r\n lines_after_17=[]\r\n clean_lines=[]\r\n lines_after_17 = f.readlines()[46:70]\r\n # print(lines_after_17)\r\n clean_lines = [x.strip(' ') for x in lines_after_17]\r\n # print(clean_lines)\r\n\r\n\r\n for row in clean_lines:\r\n row = re.split(' +', row)\r\n # row = row.split('/s* ')\r\n # print(row)\r\n Vgs.append(row[8])\r\n Ids.append(row[15])\r\n x = np.array(Vgs)\r\n y = np.array(Ids)\r\n # print(x)\r\n # print(y)\r\n\r\n\r\n # continue\r\n # else:\r\n # continue\r\n\r\n #print(x)\r\n #print(y)\r\n print(Vgs)\r\n print(Ids)\r\n length_of_Vgs=len(Vgs)\r\n length_of_Ids=len(Ids)\r\n Vd=[]\r\n Vg=[]\r\n Idlin=[]\r\n Idsat=[]\r\n\r\n print(Ids[0])\r\n print(Ids[4])\r\n print(Ids[8])\r\n print(Ids[12])\r\n print(Ids[16])\r\n print(Ids[20])\r\n\r\n\r\n\r\n print(length_of_Vgs)\r\n #Vg=Vgs[0]\r\n #Idlin=Ids[0]\r\n\r\n print(Vg)\r\n print(Idlin)\r\n\r\n for j in range(0,length_of_Vgs):\r\n if j % 4 == 0:\r\n Vg.append(Vgs[j])\r\n\r\n for i in range(0,length_of_Ids):\r\n if i % 4 == 0:\r\n Idlin.append(Ids[i])\r\n Idsat.append(Ids[i+2])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # Vg = Vgs[j]\r\n\r\n\r\n\r\n print(Vg)\r\n print(Idlin)\r\n print(Idsat)\r\n\r\n\r\n x = [1.67730e-03,1.60140e-03,1.60960e-03,1.58380e-03,1.64970e-03]\r\n y = [9.92410e-03,9.54660e-03,9.57090e-03,9.49610e-03,9.90710e-03]\r\n\r\n print (x)\r\n\r\n print(y)\r\n\r\n\r\n Idlin2=list(map(float, Idlin))\r\n Idsat2=list(map(float, Idsat))\r\n\r\n Idlin_point_diff_Werner=[(x/Idlin_initial_meas_die20_werner3[iter-1]-1)*100 for x in Idlin2]\r\n Idsat_point_diff_Werner=[(x/Idsat_initial_meas_die20_werner3[iter-1]-1)*100 for x in Idsat2]\r\n\r\n\r\n print(\"Idlin_point_diff_Werner\")\r\n print(Idlin_point_diff_Werner)\r\n\r\n print(\"Idsat_point_diff_Werner\")\r\n print(Idsat_point_diff_Werner)\r\n die=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39]\r\n\r\n fig, bx = plt.subplots()\r\n\r\n bx.scatter(die, Idsat_point_diff_Werner, s=10, c='b', marker=\"s\", label='point_fullmap')\r\n plt.grid(b=True, which='both', color='black', linestyle='-')\r\n\r\n plt.xlabel('Idsat_diff_Werner_die20(%)')\r\n plt.ylabel('Dies')\r\n picname= Device_name + \"_{:02d}x_Idsat_difference_Werner.png\".format(iter)\r\n plt.title(picname)\r\n\r\n\r\n plt.savefig(picname)\r\n\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n fig, cx = plt.subplots()\r\n\r\n cx.scatter(die, Idlin_point_diff_Werner, s=10, c='b', marker=\"s\", label='point_fullmap')\r\n plt.grid(b=True, which='both', color='black', linestyle='-')\r\n\r\n plt.xlabel('Idlin_diff_Werner_die20(%)')\r\n plt.ylabel('Dies')\r\n picname=Device_name + \"_{:02d}x_Idlin_difference_Werner.png\".format(iter)\r\n plt.title(picname)\r\n\r\n plt.savefig(picname)\r\n\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n## Idlin & Idsat vs Dies\r\n\r\n fig, dx = plt.subplots()\r\n\r\n dx.scatter(die, Idsat2, s=10, c='b', marker=\"s\", label='point_fullmap')\r\n plt.grid(b=True, which='both', color='black', linestyle='-')\r\n\r\n plt.xlabel('Dies')\r\n plt.ylabel('Idsat_Dies')\r\n picname=Device_name + \"_{:02d}x_Idsat_Dies.png\".format(iter)\r\n plt.title(picname)\r\n\r\n\r\n plt.savefig(picname)\r\n\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n fig, ex = plt.subplots()\r\n\r\n ex.scatter(die, Idlin2, s=10, c='b', marker=\"s\", label='point_fullmap')\r\n plt.grid(b=True, which='both', color='black', linestyle='-')\r\n\r\n plt.xlabel('Dies(%)')\r\n plt.ylabel('Idlin_Dies')\r\n picname=Device_name + \"_{:02d}x_Idlin_Dies.png\".format(iter)\r\n plt.title(picname)\r\n\r\n plt.savefig(picname)\r\n\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n pl.plot(Idlin_point_diff_Werner, Idsat_point_diff_Werner, \"o\")\r\n\r\n for x, y, z in zip(Idlin_point_diff_Werner, Idsat_point_diff_Werner, die):\r\n pl.text(x, y, str(z), color=\"red\", fontsize=7)\r\n pl.margins(0.1)\r\n picname = Device_name + \"_{:02d}x_Idlin-Idsat_percent_diff_to_werner.png\".format(iter)\r\n pl.title(picname)\r\n pl.savefig(picname)\r\n\r\n\r\n\r\n\r\n pl.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n print(Idlin2)\r\n print(Idsat2)\r\n\r\n print (type(x))\r\n\r\n print (type(Idlin))\r\n\r\n fig, ax = plt.subplots()\r\n #ax.scatter(x, y)\r\n ax.scatter(Idlin2, Idsat2, s=10, c='b', marker=\"s\", label='point_fullmap')\r\n ax.scatter(Idlin_initial_meas_die20_werner3[iter-1], Idsat_initial_meas_die20_werner3[iter-1], s=40, c='g', marker=\"*\", label='Meas_initial_Die20')\r\n # ax.scatter(Idlin_initial_red_die20, Idsat_initial_red_die20, s=20, c='c', marker=\"+\", label='Meas_initial_Die20')\r\n # ax.scatter(Idlin_initial_red_die21, Idsat_initial_red_die21, s=20, c='c', marker=\"+\", label='Meas_initial_Die20')\r\n # ax.scatter(Idlin_initial_red_die22, Idsat_initial_red_die22, s=20, c='c', marker=\"+\", label='Meas_initial_Die20')\r\n ax.scatter(Idlin_initial_meas_die1_walter3[iter-1], Idsat_initial_meas_die1_walter3[iter-1], s=30, c='r', marker=\"D\", label='Meas_initial_Die20')\r\n ax.scatter(Idlin_TT3[iter-1], Idsat_TT3[iter-1], s=70, c='lime', marker=\"D\", label='Meas_initial_Die20')\r\n ax.scatter(Idlin_SS3[iter-1], Idsat_SS3[iter-1], s=70, c='lime', marker=\"D\", label='Meas_initial_Die20')\r\n ax.scatter(Idlin_FF3[iter-1], Idsat_FF3[iter-1], s=70, c='lime', marker=\"D\", label='Meas_initial_Die20')\r\n\r\n\r\n #plt.xlim(0.001, 0.0018)\r\n #plt.ylim(0.008, 0.0105)\r\n #ax.ticklabel_format(useOffset=False)\r\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0.0005, 0.0098))\r\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0.0005, 0.0405))\r\n\r\n\r\n plt.xlabel('Idlin(A)')\r\n plt.ylabel('Idsat(A)')\r\n\r\n if TYPE==-1:\r\n plt.gca().invert_yaxis()\r\n plt.gca().invert_xaxis()\r\n\r\n\r\n\r\n\r\n\r\n\r\n # plt.legend([\"Point_measurement\", \"Remeas_Werner_die20\", \"Remeas_Werner_reduced_die20\", \"Remeas_Werner_reduced_die21\", \"Remeas_Werner_reduced_die22\", \"Remeas_Walter_die1\"], loc=\"lower right\")\r\n\r\n plt.legend([\"Point_measurement\", \"Remeas_werner_die20\", \"Remeas_walter_die01\" ,\"Model-TT(contact res=2)\"], loc=\"lower right\")\r\n\r\n\r\n picname=Device_name + \"_{:02d}xTP25.png\".format(iter)\r\n plt.title(picname)\r\n\r\n\r\n plt.savefig(picname)\r\n plt.grid()\r\n plt.show()\r\n\r\n\r\n\r\n wb = xlwt.Workbook()\r\n ws = wb.add_sheet('Sheet 1')\r\n\r\n listdata = ['write', 'list', 'to', 'excel', 'row']\r\n\r\n first_row = 0\r\n second_row=1\r\n\r\n # write each item in the list to consecutive columns on the first row\r\n for index, item in enumerate(Idlin):\r\n ws.write(first_row, index, item)\r\n\r\n for index, item in enumerate(Idsat):\r\n ws.write(second_row, index, item)\r\n\r\n\r\n excelname=Device_name + \"_{:02d}xTP25.png\".format(iter)\r\n wb.save(excelname + '.xls')\r\n\r\n","repo_name":"karnatipenchalarohith/Python_Plot_data_multiple_files","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1508052479","text":"import os\nfrom time import sleep\nfrom json import dumps, loads\nfrom kafka import KafkaConsumer, KafkaProducer\nfrom random import randint\nimport math\n\n\ninstanceID = os.environ[\"instanceID\"]\n\nproducer = KafkaProducer(\n bootstrap_servers = ['kafka:9092']\n , value_serializer = lambda x: dumps(x).encode('utf-8')\n)\n\nconsumer = KafkaConsumer(\n 'llm-queue-input',\n bootstrap_servers=['kafka:9092'],\n auto_offset_reset='earliest',\n enable_auto_commit=True,\n group_id='DUMMY-LLM',\n value_deserializer=lambda x: loads(x.decode('utf-8'))\n)\n\n\nfor message in consumer:\n print(f'Processed {message.value}', flush=True)\n sleep(randint(10,100) * math.pow(int(instanceID), 4) / 1000)\n producer.send('llm-queue-output', value=f'Message {message.value} processed by {instanceID}')\n","repo_name":"itba-fcastaneda/73.40-Arquitectura-de-Microservicios","sub_path":"clase-05/async/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"40285602221","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMass saturation ratio of water vapour with respect to ice\nUsing formula from K. Emanuel book\n\nCreated on Sat Mar 31 19:34:58 2018\n\n@author: Bernard Legras\n\"\"\"\nimport numpy as np\n# Calculation of the saturation mixing ratio from actual temperature and pressure\ndef satratio(p,T):\n \"\"\" Calculate the mass saturation ratio from pressure (in Pa) and temperature \n (in K). Output in kg/kg \"\"\"\n estar = 1.0008*np.exp(23.33086-(6111.72784/T)+0.15215*np.log(T))\n satr = 0.622 * estar/(0.01*p-estar)\n return satr","repo_name":"hugolestrelin/STC","sub_path":"pylib/satratio.py","file_name":"satratio.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24248236542","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\n\nfrom .components.bijections import (\n FlipBijection,\n MADEBijection,\n BatchNormBijection,\n ViewBijection,\n ConditionalAffineBijection,\n CoupledRationalQuadraticSplineBijection,\n AutoregressiveRationalQuadraticSplineBijection,\n LULinearBijection,\n RandomChannelwisePermutationBijection\n)\nfrom .components.densities import (\n DiagonalGaussianDensity,\n DiagonalGaussianConditionalDensity,\n get_model_gaussian_mixture_density,\n VIDensity,\n BaseConditionalVIPosterior,\n BaseUnconditionalVIPosterior,\n BijectionVIPosterior,\n CifVIPosterior,\n BayesianFactorModel,\n UnconditionalVITarget,\n BernoulliConditionalDensity,\n SplitDensity\n)\nfrom .components.couplers import (\n IndependentCoupler,\n ChunkedSharedCoupler\n)\nfrom .components.networks import (\n ConstantNetwork,\n get_mlp,\n get_resnet,\n OneLayerCNN,\n VAEOneLayerDecoder,\n TupleMLP,\n TupleConvnetUpscaleVector,\n TupleResnetVectorizeImage,\n ConvEncoder,\n ConvDecoder\n)\n\n\n_DTYPE = torch.get_default_dtype()\n\n\ndef get_vi_density(\n schema,\n latent_shape,\n targets\n):\n layer_config = schema[0]\n schema_tail = schema[1:]\n\n assert layer_config[\"type\"] == \"vi-head\", f\"Invalid layer {layer_config['type']}\"\n\n vi_posterior = get_density_recursive(schema_tail, latent_shape)\n\n density = VIDensity(\n vi_posterior=vi_posterior,\n target=targets[\"model\"],\n groundtruth_target=targets[\"groundtruth\"]\n )\n return density\n\n\ndef get_density_recursive(\n schema,\n x_shape\n):\n # TODO: We could specify this explicitly to allow different prior distributions\n if not schema:\n return get_standard_gaussian_density(x_shape=x_shape)\n\n layer_config = schema[0]\n schema_tail = schema[1:]\n\n if layer_config[\"type\"] == \"split\":\n split_x_shape = (x_shape[0] // 2, *x_shape[1:])\n return SplitDensity(\n density_1=get_density_recursive(\n schema=schema_tail,\n x_shape=split_x_shape\n ),\n density_2=get_standard_gaussian_density(x_shape=split_x_shape),\n dim=1\n )\n\n elif layer_config[\"type\"] == \"vi-base\":\n return get_vi_base_density(\n layer_config=layer_config,\n latent_shape=x_shape\n )\n\n else:\n return get_bijection_density(\n layer_config=layer_config,\n schema_tail=schema_tail,\n x_shape=x_shape\n )\n\n\ndef get_bijection_density(layer_config, schema_tail, x_shape):\n # XXX: For vi problems, x_shape is the shape of the latent dim\n bijection = get_bijection(layer_config=layer_config, x_shape=x_shape)\n\n prior = get_density_recursive(\n schema=schema_tail,\n x_shape=bijection.z_shape\n )\n\n if layer_config.get(\"num_u_channels\", 0) == 0:\n return BijectionVIPosterior(\n bijection=bijection,\n vi_prior=prior\n )\n\n else:\n if layer_config[\"amortized\"]:\n r_u_in_shape = (x_shape, layer_config[\"data_shape\"])\n else:\n r_u_in_shape = x_shape\n\n return CifVIPosterior(\n bijection=bijection,\n vi_prior=prior,\n q_u_given_w=get_conditional_density(\n num_channels_per_output=layer_config[\"num_u_channels\"],\n coupler_config=layer_config[\"p_coupler\"],\n input_shape=x_shape\n ),\n r_u_given_z=get_conditional_density(\n num_channels_per_output=layer_config[\"num_u_channels\"],\n coupler_config=layer_config[\"q_coupler\"],\n input_shape=r_u_in_shape\n ),\n amortized=layer_config[\"amortized\"]\n )\n\n\ndef get_standard_gaussian_density(x_shape):\n return DiagonalGaussianDensity(\n mean=torch.zeros(x_shape),\n log_stddev=torch.zeros(x_shape),\n num_fixed_samples=64\n )\n\n\ndef get_bijection(\n layer_config,\n x_shape\n):\n if layer_config[\"type\"] == \"flatten\":\n return ViewBijection(x_shape=x_shape, z_shape=(int(np.prod(x_shape)),))\n\n elif layer_config[\"type\"] == \"made\":\n assert len(x_shape) == 1\n return MADEBijection(\n num_input_channels=x_shape[0],\n hidden_channels=layer_config[\"hidden_channels\"],\n activation=get_activation(layer_config[\"activation\"])\n )\n\n elif layer_config[\"type\"] == \"batch-norm\":\n return BatchNormBijection(\n x_shape=x_shape,\n per_channel=layer_config[\"per_channel\"],\n apply_affine=layer_config[\"apply_affine\"],\n momentum=layer_config[\"momentum\"]\n )\n\n elif layer_config[\"type\"] == \"cond-affine\":\n return ConditionalAffineBijection(\n x_shape=x_shape,\n coupler=get_coupler(\n input_shape=(layer_config[\"num_u_channels\"], *x_shape[1:]),\n num_channels_per_output=x_shape[0],\n config=layer_config[\"st_coupler\"]\n )\n )\n\n elif layer_config[\"type\"] == \"flip\":\n return FlipBijection(x_shape=x_shape, dim=1)\n\n elif layer_config[\"type\"] == \"linear\":\n assert len(x_shape) == 1\n return LULinearBijection(num_input_channels=x_shape[0])\n\n elif layer_config[\"type\"] == \"rand-channel-perm\":\n return RandomChannelwisePermutationBijection(x_shape=x_shape)\n\n elif layer_config[\"type\"] == \"nsf-ar\":\n assert len(x_shape) == 1\n return AutoregressiveRationalQuadraticSplineBijection(\n num_input_channels=x_shape[0],\n num_hidden_layers=layer_config[\"num_hidden_layers\"],\n num_hidden_channels=layer_config[\"num_hidden_channels\"],\n num_bins=layer_config[\"num_bins\"],\n tail_bound=layer_config[\"tail_bound\"],\n activation=get_activation(layer_config[\"activation\"]),\n dropout_probability=layer_config[\"dropout_probability\"]\n )\n\n elif layer_config[\"type\"] == \"nsf-c\":\n assert len(x_shape) == 1\n return CoupledRationalQuadraticSplineBijection(\n num_input_channels=x_shape[0],\n num_hidden_layers=layer_config[\"num_hidden_layers\"],\n num_hidden_channels=layer_config[\"num_hidden_channels\"],\n num_bins=layer_config[\"num_bins\"],\n tail_bound=layer_config[\"tail_bound\"],\n activation=get_activation(layer_config[\"activation\"]),\n dropout_probability=layer_config[\"dropout_probability\"],\n reverse_mask=layer_config[\"reverse_mask\"]\n )\n\n else:\n assert False, f\"Invalid layer type {layer_config['type']}\"\n\n\ndef get_conditional_density(\n num_channels_per_output,\n coupler_config,\n input_shape\n):\n return DiagonalGaussianConditionalDensity(\n coupler=get_coupler(\n input_shape=input_shape,\n num_channels_per_output=num_channels_per_output,\n config=coupler_config\n )\n )\n\n\ndef get_coupler(\n input_shape,\n num_channels_per_output,\n config\n):\n if config[\"independent_nets\"]:\n return get_coupler_with_independent_nets(\n input_shape=input_shape,\n num_channels_per_output=num_channels_per_output,\n shift_net_config=config[\"shift_net\"],\n log_scale_net_config=config[\"log_scale_net\"]\n )\n\n else:\n return get_coupler_with_shared_net(\n input_shape=input_shape,\n num_channels_per_output=num_channels_per_output,\n net_config=config[\"shift_log_scale_net\"]\n )\n\n\ndef get_coupler_with_shared_net(\n input_shape,\n num_channels_per_output,\n net_config\n):\n return ChunkedSharedCoupler(\n shift_log_scale_net=get_coupler_net(\n input_shape=input_shape,\n num_output_channels=2*num_channels_per_output,\n net_config=net_config\n )\n )\n\n\ndef get_coupler_with_independent_nets(\n input_shape,\n num_channels_per_output,\n shift_net_config,\n log_scale_net_config\n):\n return IndependentCoupler(\n shift_net=get_coupler_net(\n input_shape=input_shape,\n num_output_channels=num_channels_per_output,\n net_config=shift_net_config\n ),\n log_scale_net=get_coupler_net(\n input_shape=input_shape,\n num_output_channels=num_channels_per_output,\n net_config=log_scale_net_config\n )\n )\n\n\ndef get_coupler_net(input_shape, num_output_channels, net_config):\n num_input_channels = input_shape[0]\n\n if net_config[\"type\"] == \"mlp\":\n assert len(input_shape) == 1\n return get_mlp(\n num_input_channels=num_input_channels,\n hidden_channels=net_config[\"hidden_channels\"],\n num_output_channels=num_output_channels,\n activation=get_activation(net_config[\"activation\"])\n )\n\n elif net_config[\"type\"] == \"resnet\":\n assert len(input_shape) == 3\n return get_resnet(\n num_input_channels=num_input_channels,\n hidden_channels=net_config[\"hidden_channels\"],\n num_output_channels=num_output_channels\n )\n\n elif net_config[\"type\"] == \"constant\":\n value = torch.full((num_output_channels, *input_shape[1:]), net_config[\"value\"])\n return ConstantNetwork(value=value, fixed=net_config[\"fixed\"])\n\n elif net_config[\"type\"] == \"identity\":\n assert num_output_channels == num_input_channels\n return lambda x: x\n\n elif net_config[\"type\"] == \"amortized-coupler\":\n assert len(input_shape) == 2\n if net_config[\"structure\"] == \"mlp\":\n net = TupleMLP\n\n elif net_config[\"structure\"] == \"vector-to-image\":\n net = TupleConvnetUpscaleVector\n\n elif net_config[\"structure\"] == \"image-to-vector\":\n net = TupleResnetVectorizeImage\n\n else:\n assert False, f\"Invalid net structure {net_config['structure']}\"\n\n return net(\n input_shapes=input_shape,\n hidden_channels=net_config[\"hidden_channels\"],\n num_output_channels=num_output_channels,\n activation=get_activation(net_config[\"activation\"])\n )\n\n else:\n assert False, f\"Invalid net type {net_config['type']}\"\n\n\ndef get_activation(name):\n if name == \"tanh\":\n return nn.Tanh\n elif name == \"relu\":\n return nn.ReLU\n else:\n assert False, f\"Invalid activation {name}\"\n\n\ndef get_vi_base_density(layer_config, latent_shape):\n # XXX: Does not handle multi-indexed latents\n latent_dim = latent_shape[0]\n\n if layer_config[\"amortized\"]:\n if layer_config.get(\"vae_one_layer\", False):\n return BaseConditionalVIPosterior(\n prior_density=DiagonalGaussianConditionalDensity(\n coupler=ChunkedSharedCoupler(\n shift_log_scale_net=OneLayerCNN(\n input_shape=layer_config[\"data_shape\"],\n output_dim=latent_shape[0]*2,\n num_hidden_channels=layer_config[\"num_hidden_channels\"],\n kernel_size=layer_config[\"kernel_size\"],\n stride=layer_config[\"stride\"],\n activation=get_activation(layer_config[\"activation\"])\n )\n )\n )\n )\n\n elif layer_config.get(\"vae_large\", False):\n return BaseConditionalVIPosterior(\n prior_density=DiagonalGaussianConditionalDensity(\n coupler=ChunkedSharedCoupler(\n shift_log_scale_net=ConvEncoder(\n context_features=latent_dim*2,\n channels_multiplier=layer_config[\"channels_multiplier\"]\n )\n )\n )\n )\n\n else:\n return BaseConditionalVIPosterior(\n prior_density=get_conditional_density(\n num_channels_per_output=latent_dim,\n coupler_config=layer_config[\"coupler\"],\n input_shape=layer_config[\"data_shape\"]\n )\n )\n\n elif layer_config[\"target\"] == \"bayesian-neural-net\":\n return BaseUnconditionalVIPosterior(\n prior_density=get_standard_gaussian_density(\n x_shape=latent_shape\n )\n )\n\n else:\n mean = torch.zeros(latent_shape, dtype=_DTYPE)\n log_stddev = np.log(layer_config[\"stddev\"]) + torch.zeros(latent_shape, dtype=_DTYPE)\n return BaseUnconditionalVIPosterior(\n prior_density=DiagonalGaussianDensity(\n mean=mean.requires_grad_(layer_config[\"learnable_mean\"]),\n log_stddev=log_stddev.requires_grad_(layer_config[\"learnable_stddev\"])\n )\n )\n\n\ndef get_vi_targets(layer_config, latent_shape):\n if layer_config[\"target\"] == \"mog-lattice\":\n groundtruth = get_model_gaussian_mixture_density(\n layer_config=layer_config,\n latent_shape=latent_shape,\n groundtruth=True\n )\n # XXX: No distinction between groundtruth and model here\n gt_and_model = UnconditionalVITarget(groundtruth)\n return {\n \"groundtruth\": gt_and_model,\n \"model\": gt_and_model\n }\n\n elif layer_config[\"target\"] in [\"vae-one-layer\", \"vae-large\"]:\n prior = get_standard_gaussian_density(latent_shape)\n\n if layer_config[\"target\"] == \"vae-one-layer\":\n likelihood = BernoulliConditionalDensity(\n coupler=VAEOneLayerDecoder(\n latent_dim=latent_shape[0],\n data_shape=layer_config[\"data_shape\"],\n num_hidden_channels=layer_config[\"num_hidden_channels\"],\n kernel_size=layer_config[\"kernel_size\"],\n stride=layer_config[\"stride\"]\n )\n )\n\n else:\n likelihood = BernoulliConditionalDensity(\n coupler=ConvDecoder(\n latent_features=latent_shape[0],\n channels_multiplier=layer_config[\"channels_multiplier\"]\n )\n )\n\n model = BayesianFactorModel(\n prior=prior,\n likelihood=likelihood\n )\n\n return {\n \"groundtruth\": None,\n \"model\": model\n }\n\n else:\n assert False, f\"Invalid VI target {layer_config['target']}\"\n\n\ndef get_nn_param_list(list_of_inits, requires_grad=False):\n list_of_params = [\n nn.Parameter(torch.tensor(p, dtype=_DTYPE), requires_grad=requires_grad)\n for p in list_of_inits\n ]\n return nn.ParameterList(list_of_params)\n","repo_name":"anthonycaterini/cif-vi","sub_path":"cif/models/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":14867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"14480327379","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport tempfile\nimport subprocess\n\nif os.name == 'posix' and sys.stderr.isatty():\n WARN_BEGIN = '\\033[93;40m'\n WARN_END = '\\033[0m'\nelse:\n WARN_BEGIN = ''\n WARN_END = ''\n\ndef warn(*args, **kwargs):\n print(WARN_BEGIN + \"Warning: \", *args, WARN_END, file=sys.stderr, **kwargs)\n\ndef tohex(value):\n if type(value) == int:\n return hex(value)\n elif type(value) == bytes or type(value) == bytearray:\n return value.hex()\n\n\n# Command used here is from https://packages.debian.org/stable/lzma-alone\n# python lzma library compress data as stream with unknown size (0xFFFFFFFFFFFFFFFF)\n# Fix it if possible to do with standard lzma library\ndef lzma_compress(data, dict_bits=19):\n REBUILD_LZMA_COMMAND = ['lzma_alone', '-d%d' % dict_bits, '-so', 'e']\n with tempfile.TemporaryDirectory() as tmpdirname:\n inpath = os.path.join(tmpdirname, \"in.bin\")\n with open(inpath, 'wb') as infile:\n infile.write(data)\n p = subprocess.Popen(REBUILD_LZMA_COMMAND + [inpath], stdout=subprocess.PIPE)\n outs, _ = p.communicate()\n return outs\n\ndef align_bytes(b, alignment):\n l = len(b)\n rem = l % alignment\n if rem != 0:\n pad = b'\\0' * (l-rem)\n","repo_name":"syschmod/dlink_patch_utils","sub_path":"dlink_utils.py","file_name":"dlink_utils.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6785313305","text":"import pandas as pd\nfrom transformers import AutoTokenizer\nfrom datasets import Dataset, DatasetDict\nfrom sklearn.model_selection import train_test_split\nimport torch\nimport torch.utils.data as data_utils\n\n\ndef _make_dataloader_transformers(data_path, model_name, test_size: float = 0.1, max_length: int = 128,\n sample_size: int = 50000):\n \"\"\"\n Creates tokenized_dataset for seq2seq for transformers model\n :param data_path: path to the input .csv file\n :param model_name: model name\n :param test_size: size of the validation part\n :param max_length: maximum length of the tokenized sequence\n :param sample_size: how many elements will be used for training\n \"\"\"\n print('Creating dataloader...')\n df = pd.read_csv(data_path)\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n prefix = \"Make this sentence non-toxic: \"\n\n def preprocess_function(examples):\n inputs = [prefix + example for example in examples[\"toxic\"]]\n targets = examples[\"detoxified\"]\n model_inputs = tokenizer(inputs, text_target=targets, max_length=max_length, truncation=True)\n return model_inputs\n\n dataset = Dataset.from_pandas(df[['toxic', 'detoxified']]).select(range(sample_size))\n train_dataset, validation_dataset = dataset.train_test_split(test_size=test_size).values()\n\n dd = DatasetDict({\"train\": train_dataset, \"test\": validation_dataset})\n tokenized_dataset = dd.map(preprocess_function, batched=True)\n\n print('Dataloader created')\n return tokenized_dataset\n\n\ndef _make_dataloader_pytorch(data_path, model_name, test_size: float = 0.1, batch_size: int = 512,\n random_seed: int | None = 0, device: str = 'cpu'):\n \"\"\"\n Creates dataloader for pytorch model\n :param data_path: path to the input .csv file\n :param model_name: model name\n :param test_size: size of the validation part\n :param batch_size: batch size for the dataloader\n :param random_seed: parameter responsible for reproducible results\n :param device: on which device train model\n \"\"\"\n print('Creating dataloader...')\n df = pd.read_csv(data_path)\n threshold = 0.5\n\n df['tox_level'] = df['tox_level'].apply(lambda x: 1 if x > threshold else 0)\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n def preprocessing_stage(sample):\n # in the preprocessing phase, I convert the input text to the list of tokens\n model_inputs = tokenizer(sample['text'], padding='max_length', max_length=256, truncation=True)\n return model_inputs['input_ids']\n\n df['input_ids'] = df.apply(lambda x: preprocessing_stage(x), axis=1)\n df.drop(columns=['text'], inplace=True)\n train, val = train_test_split(\n df, stratify=df['tox_level'], test_size=test_size, random_state=random_seed\n )\n\n def collate_batch(batch):\n text_list, toxicity_list = [], []\n for _toxicity, _text in batch:\n text_list.append(_text)\n toxicity_list.append(_toxicity)\n return torch.LongTensor(text_list).to(device), torch.FloatTensor(toxicity_list).to(device)\n\n train_dataloader = data_utils.DataLoader(\n train.to_numpy(), batch_size=batch_size, shuffle=True, collate_fn=collate_batch\n )\n\n val_dataloader = data_utils.DataLoader(\n val.to_numpy(), batch_size=batch_size, shuffle=False, collate_fn=collate_batch\n )\n print('Dataloader created')\n return train_dataloader, val_dataloader\n","repo_name":"slewie/TextDetoxification","sub_path":"src/data/make_dataloader.py","file_name":"make_dataloader.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37704402119","text":"def uniqueOccurrences(arr):\n \"\"\"\n :type arr: List[int]\n :rtype: bool\n \"\"\"\n # approach: count occurences and check if numbers of occurrences are the same\n \n # create a dictionary to hold number of occurences\n visited = {}\n\n for n in arr:\n visited[n] = visited.get(n, 0) + 1\n\n print(\"visited:\", visited)\n\n values = set()\n for key, value in visited.items():\n if value in values:\n return False\n values.add(value)\n \n return True\n \n \n\nif __name__ == \"__main__\":\n arr = [1,2,2,1,1,3]\n print(uniqueOccurrences(arr))\n # arr2 = [1,2]\n # print(uniqueOccurrences(arr2))\n # arr3 = [-3,0,1,-3,1,1,1,-3,10,0]\n # print(uniqueOccurrences(arr3))","repo_name":"zZestyy/LeetCode","sub_path":"other/01-easy/1207-UniqueNumberOfOccurences.py","file_name":"1207-UniqueNumberOfOccurences.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40119502854","text":"from connect_db import connect_db\n\n\n# query data from the database\ndef query_data(sql):\n db = connect_db()\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n db.close()\n return data","repo_name":"appletime81/QRCodeServerSystem","sub_path":"ConnectDB/query_data.py","file_name":"query_data.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18192277134","text":"import os\nimport random\nimport string\nimport threading\nimport time\nimport uuid\n\nimport coreapi\nfrom coreapi.utils import DownloadedFile, File\nfrom django.conf import settings\n\nfrom apps.game.models import TeamSubmission, Match\n\ntest_json = os.path.join(settings.BASE_DIR, 'apps', 'game', 'code_log.json')\n\n\ndef random_token():\n chars = string.ascii_letters + string.digits\n return ''.join((random.choice(chars)) for i in range(15))\n\n\ndef is_compile_report(game):\n return TeamSubmission.objects.filter(infra_token=game[\"token\"]).exists()\n\n\ndef pull_reports():\n # Requests latest results from the infrastructure and updates them\n\n submits = []\n matches = []\n\n games = [] # Request updates from the infrastructure.\n\n for game in games:\n token = game[\"token\"]\n\n if is_compile_report(game):\n compilation_result(game)\n else:\n match_results(game)\n\n\ndef compilation_result(compile_result):\n time.sleep(0.2) # one second delay for testing ... (Database errors may occur\n\n # Returns compilation results.\n\n token = compile_result[\"run_id\"]\n success = compile_result[\"success\"]\n errors = \"\"\n parameters = {}\n\n if success is True:\n errors = \"ok\"\n else:\n parameters = {}\n errors = \"Error occurred\" # TODO : fix errors with the infrastructure\n\n TeamSubmission.objects.filter(infra_token=token).update(infra_compile_message=errors)\n\n\ndef match_results(match):\n time.sleep(0.2) # one second delay for testing ... (Database errors may occur\n\n # Return matches results.\n\n token = match[\"run_id\"]\n success = match[\"success\"]\n errors = \"\"\n parameters = {\n 'code_compiled_zip': random_token(),\n 'code_log': random_token(),\n 'client1_log': random_token(),\n 'client2_log': random_token()\n }\n\n if success is True:\n errors = \"ok\"\n else:\n parameters = {}\n errors = \"Error occurred\" # TODO : fix errors with the infrastructure\n\n Match.objects.filter(infra_token=token).update(infra_match_message=errors)\n\n #TODO: this function is incomplete\n\n\"\"\"\n **** Infrastructure API Functions ****\n\"\"\"\n\n\ndef create_infra_client():\n credentials = {settings.INFRA_IP: 'Token {}'.format(settings.INFRA_AUTH_TOKEN)}\n transports = [coreapi.transports.HTTPTransport(credentials=credentials)]\n client = coreapi.Client(transports=transports)\n schema = client.get(settings.INFRA_API_SCHEMA_ADDRESS)\n return client, schema\n\n\ndef upload_file(file):\n return random_token()\n\n\ndef download_file(file_token):\n return DownloadedFile(open(test_json, mode='r+b'), test_json, False)\n\n\ndef compile_submissions(submissions):\n compile_details = []\n for submission in submissions:\n compile_details.append({\n 'success': True,\n 'run_id': random_token()\n })\n return compile_details\n\n\ndef run_matches(matches):\n matches_details = []\n for match in matches:\n matches_details.append({\n 'success': True,\n 'run_id': random_token()\n })\n return matches_details\n","repo_name":"SharifAIChallenge/aic_site","sub_path":"apps/game/functions_test.py","file_name":"functions_test.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"14250278668","text":"#!/usr/bin/env python3\n\nprefix = [\"DEBUG\", \"WARNING\", \"ERROR\", \"MESSAGE\"]\n\ndef ShowChars(num, token):\n return \"\\t\" + (token * (num + 4))\n \ndef Show(ss, message):\n if(ss < 1):\n ss = 1\n if(ss > len(prefix)):\n ss = len(prefix)\n message = prefix[ss - 1] + \": \" + message\n xx = len(message)\n stars = ShowChars(xx, '*')\n print(stars)\n print(\"\\t* \" + message + \" *\")\n print(stars)\n\ndef Append(zpre):\n prefix.append(zpre)\n\nAppend(\"TEST\")\nShow(999, \"The is a MESSAGE\")\nShow(1, \"Doh!\")\n\nfor dat in prefix:\n print(dat)\n \n\n","repo_name":"soft9000/Python1000","sub_path":"Python1100/Study/MyBannerList.py","file_name":"MyBannerList.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18973805689","text":"from typing import List\nclass Solution:\n def sumOddLengthSubarrays(self, arr: List[int]) -> int:\n res = 0\n for subarray_length in range(1, len(arr)+1, 2):\n temp_2 = self.subArrayCnt(arr, subarray_length)\n res += temp_2\n return res\n \n def subArrayCnt(self, arr, subLen):\n total = 0\n for i in range(len(arr) - subLen + 1):\n temp = arr[i: i + subLen]\n total += sum(temp) \n return total\n\ns = Solution()\nprint(s.subArrayCnt([1, 4, 2, 5, 3], 3))\nprint(s.sumOddLengthSubarrays([1, 4, 2, 5, 3]))","repo_name":"miayuxin/leetcode","sub_path":"No.1588_Sum of All Odd Length Subarrays.py","file_name":"No.1588_Sum of All Odd Length Subarrays.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73692626348","text":"#!/usr/bin/python\n\nimport string\nfrom my_pkg.bin_con import *\nfrom my_pkg.set_prt import *\n\nwhile True:\n\top = int(input('Select menu: 1)conversion 2)union/intersection 3)exit ? '))\n\tif op == 1:\n\t\tinb = input('input binary number : ')\n\t\tconvert(inb)\n\n\telif op == 2:\n\t\tstr1 = input('1st list: ')\n\t\tstr2 = input('2nd list: ')\n\t\tsetprt(str1,str2)\n\n\telif op == 3:\n\t\tprint('exit the program...')\n\t\tbreak\n\n\telse:\n\t\tprint('Error, option is not found\\n')\n\t\n","repo_name":"eunha812/OSP_2_2019113754","sub_path":"py_lab2/myprog_pkg.py","file_name":"myprog_pkg.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71005939307","text":"import pygame as pg\r\nfrom settings import *\r\nfrom objects import *\r\n\r\nclass Game:\r\n def __init__(self):\r\n pg.init()\r\n self.canvas = pg.display.set_mode((WIDTH, HEIGHT))\r\n pg.display.set_caption(\"Super Crate Box\")\r\n self.clock = pg.time.Clock()\r\n self.running = True\r\n self.font_name = pg.font.match_font(FONT_NAME)\r\n\r\n def new(self):\r\n self.score = 0\r\n self.crate_spawn_poss = []\r\n self.blocks = pg.sprite.Group()\r\n self.weapons = {}\r\n self.enemies = pg.sprite.Group()\r\n self.projectiles = pg.sprite.Group()\r\n self.enemies.add(Enemy(self.canvas, self.blocks, self.projectiles))\r\n self.crates = pg.sprite.Group()\r\n self.init_level()\r\n self.crates.add(Crate(random.choice(self.crate_spawn_poss)))\r\n self.player = Player(self.canvas, \"img\\\\player.png\", START_POS_X, START_POS_Y, self.blocks, self.enemies, self.crates)\r\n self.init_weapons()\r\n self.player.weapon = self.weapons[\"pistol\"]\r\n self.player_g = pg.sprite.Group()\r\n self.player_g.add(self.player)\r\n self.playing = True\r\n pg.display.flip()\r\n while self.playing:\r\n self.run()\r\n\r\n def run(self):\r\n self.clock.tick(FPS)\r\n self.events()\r\n self.update()\r\n self.fill()\r\n self.draw()\r\n\r\n def update(self):\r\n self.player.update()\r\n self.blocks.update()\r\n self.enemies.update()\r\n self.projectiles.update()\r\n self.crates.update()\r\n #print(self.clock.get_fps())\r\n\r\n def events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.playing = False\r\n self.running = False\r\n if event.type == SPAWN_ENEMY:\r\n self.enemies.add(Enemy(self.canvas, self.blocks, self.projectiles))\r\n if event.type == PLAYER_DEATH:\r\n self.playing = False\r\n if event.type == ENEMY_DEATH:\r\n self.enemies.remove(event.caller[0])\r\n self.projectiles.remove(event.caller[1])\r\n if event.type == BULLET_HIT:\r\n self.projectiles.remove(event.caller)\r\n if event.type == RELOAD:\r\n self.player.can_shoot = True\r\n if event.type == CRATE_PICKUP:\r\n self.crates.remove(event.caller)\r\n self.crates.add(Crate(random.choice(self.crate_spawn_poss)))\r\n self.player.weapon = self.weapons[random.choice(list(self.weapons.keys()))]\r\n self.score +=1\r\n\r\n def fill(self):\r\n self.canvas.fill(BG)\r\n\r\n def draw(self):\r\n self.blocks.draw(self.canvas)\r\n self.enemies.draw(self.canvas)\r\n self.projectiles.draw(self.canvas)\r\n self.crates.draw(self.canvas)\r\n self.player_g.draw(self.canvas)\r\n self.do_text(str(self.score), 30, BLACK, WIDTH / 2, 30)\r\n pg.display.flip()\r\n\r\n\r\n def init_level(self):\r\n self.blocks.add(Block(self.canvas, \"img\\\\leftwall.png\", 0, 20, \"wall\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\leftwall.png\", 700, 20, \"wall\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 0, 0, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 400, 0, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 0, 460, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 400, 460, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 200, 130, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\bigplatform.png\", 200, 350, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\smallplatform.png\", 20, 240, \"platform\"))\r\n self.blocks.add(Block(self.canvas, \"img\\\\smallplatform.png\", 540, 240, \"platform\"))\r\n self.crate_spawn_poss = [(255, 115), (465, 115), (100, 225), (620, 225), (255, 335), (465, 335), (100, 445), (620, 445)]\r\n\r\n def init_weapons(self):\r\n #bullet sprite, reload time, bullet speed, bullets per shot, spread, damage, lifetime, knockback, recoil\r\n self.weapons[\"pistol\"] = Weapon(\"img\\\\smallbullet.png\", 200, 7, 1, 0, 10, 10000, 0, 0, self.blocks, self.player, self.projectiles)\r\n self.weapons[\"shotgun\"] = Weapon(\"img\\\\smallbullet.png\", 1000, 7, 8, 1, 5, 10000, 1, 10, self.blocks, self.player, self.projectiles)\r\n self.weapons[\"machinegun\"] = Weapon(\"img\\\\smallbullet.png\", 50, 7, 1, 0.1, 3, 10000, 1, 10, self.blocks, self.player, self.projectiles)\r\n\r\n def do_text(self, text, size, text_color, x, y):\r\n font = pg.font.Font(self.font_name, size)\r\n text_surface = font.render(text, True, text_color)\r\n text_rect = text_surface.get_rect()\r\n text_rect.center = (x, y)\r\n self.canvas.blit(text_surface, text_rect)\r\n\r\ngame = Game()\r\nwhile game.running:\r\n game.new()\r\n\r\npg.quit()\r\n","repo_name":"dmarch0/python-game-projects","sub_path":"super crate box pg/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27980571127","text":"#!/usr/bin/python3\n\"\"\" 1-fifo_cache.py \"\"\"\nfrom base_caching import BaseCaching\n\n\nclass FIFOCache(BaseCaching):\n \"\"\" inherits from BaseCaching and is a caching system\n \"\"\"\n def __init__(self):\n super().__init__()\n self.data = {}\n self.next_in, self.next_out = 0, 0\n\n def _pop(self):\n \"\"\" pops out of the list\n \"\"\"\n self.next_out += 1\n key = self.data[self.next_out]\n del self.data[self.next_out], self.cache_data[key]\n\n def _push(self, key, item):\n \"\"\" appends to a list\n \"\"\"\n if len(self.cache_data) > BaseCaching.MAX_ITEMS - 1:\n print(\"DISCARD: {}\".format(self.data[self.next_out + 1]))\n self._pop()\n self.cache_data[key] = item\n self.next_in += 1\n self.data[self.next_in] = key\n\n def put(self, key, item):\n \"\"\" Add an item in the cache\n \"\"\"\n if key and item:\n if key in self.cache_data:\n self.cache_data[key] = item\n else:\n self._push(key, item)\n\n def get(self, key):\n \"\"\" Get an item by key\n \"\"\"\n if key is None or self.cache_data.get(key) is None:\n return None\n if key in self.cache_data:\n val = self.cache_data[key]\n return val\n","repo_name":"gardenia-homsi/holbertonschool-python","sub_path":"0x1A-caching/1-fifo_cache.py","file_name":"1-fifo_cache.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14564357689","text":"from A_Test import A_Test\nimport time\nimport numpy as np\n\nclass PN_Test(A_Test):\n def __init__(self):\n super(PN_Test, self).__init__()\n\n def run(self):\n\n self.stop_flg = False\n\n # ----- парсим параметры\n\n #return\n\n\n\n\n self._span = self.user[0]\n self.rbw = self.user[1]\n self.arr = self.user[2]\n\n # ----- запись в лог\n\n if self.logfile:\n self.name = self.logfile\n self.logfile = open(self.logfile + '.txt', 'w')\n\n self.writeToLog('Date: ' + time.ctime(time.time()))\n self.writeToLog('Generator: ' + self.gen.fullName)\n self.writeToLog('Analyzer: ' + self.an.fullName)\n self.writeToLog('RBW: ' + str(self.rbw))\n self.writeToLog('Тип устройства: ' + self.dev.type)\n self.writeToLog('Использование калибровки: ' + str(self.cal_flg))\n self.writeToLog('Амплитуда по входу: ' + str(self.level))\n self.writeToLog('_type: ' + 'PN')\n\n if self.dev.type == 'Panorama':\n self.writeToLog('Аттенюатор: ' + str(self.dev.getAtt()))\n self.writeToLog('Источник опорной частоты: ' + self.dev.getRef())\n self.writeToLog('МШУ: ' + str(self.dev.getLNA()))\n\n self.writeToLog('\\n')\n self.writeToLog('PN')\n self.writeToLog('\\n')\n\n # ----- инициализация Анализатора\n\n self.an.reset()\n self.an.setFreqSpan(10)\n if self.dev.type == 'Panorama':\n self.an.setFreqCent(750)\n else:\n pass\n\n if self.dev.type == 'Panorama':\n if self.dev.getLNA():\n self.an.setRefLvl(self.level + 45, 'dBm')\n else:\n self.an.setRefLvl(self.level + 30, 'dBm')\n else:\n self.an.setRefLvl(self.level + 5)\n\n self.an.enablePhaseNoise()\n\n self.an.setTracAver(True)\n self.an.singleSweepMode()\n self.an.setSweep(2.5)\n\n #self.an.setBandwidth(self.rbw)\n\n # ----- инициализация Генератора\n self.gen.reset()\n self.gen.setLevel(self.level - self.cal_in[round(self.beg)], 'dBm')\n self.gen.setFreq(self.beg)\n self.gen.RFOutON()\n # -----\n\n self.x = []\n self.y = []\n\n time.sleep(0.5)\n\n try:\n for i in range(self.beg, self.end + 1, self.step):\n\n if self.stop_flg:\n raise Warning\n\n self.dev.setFreqReboot(i)\n\n self.gen.setLevel(self.level - self.cal_in[round(i)], 'dBm')\n self.gen.setFreq(i)\n\n time.sleep(0.03)\n\n self.an.setFreqSpan(10, 'kHz') # что бы не сползало\n self.an.setBandwidth(300, 'Hz')\n self.an.averBeginMeas(5)\n self.an.waitEndCmd()\n self.an.markerOneSetMax()\n self.an.setCenterOnMarker()\n\n self.an.waitEndCmd()\n\n time.sleep(0.03)\n\n sch = -1\n for _sp in self._span:\n sch = sch + 1\n\n\n\n self.an.setFreqSpan(_sp.span[0], _sp.span[1])\n self.an.setBandwidth(_sp.rbw[0], _sp.rbw[1])\n\n\n time.sleep(0.03)\n\n self.an.averBeginMeas(5)\n self.an.waitEndCmd()\n self.an.PhaseNoisePeakSearch()\n self.an.setDeltaMarker2(_sp.mkr[0], _sp.mkr[1])\n self.an.waitEndCmd()\n\n val = round(float(self.an.getPhaseNoise()), 2)\n\n self.arr[sch].x.append(i)\n self.arr[sch].y[0].append(val)\n\n self.mpl_plot.emit(self.arr, True, 'Фазовые шумы')\n self.progress_signal.emit(i)\n\n except:\n raise\n finally:\n\n self.progress_signal.emit(self.end)\n\n try:\n self.gen.RFOutOFF()\n except:\n pass\n\n try: #пишем в лог данные\n\n self.logfile.write('\\n')\n self.logfile.write('\\n')\n\n for i in range(len(self.arr[0].x)):\n self.logfile.write(str(self.arr[0].x[i]) + ' ')\n for j in range(len(self.arr)): #бежим по графикам\n self.logfile.write(str(self.arr[j].y[0][i]) + ' ')\n self.logfile.write('\\n')\n\n\n\n \"\"\"\"\n for plt in self.arr:\n self.writeToLog('\\n')\n self.writeToLog(plt.title)\n\n for i in range(len(plt.x)):\n self.writeToLog(str(plt.x[i]) + ' ' + str(plt.y[0][i]))\n \"\"\"\n\n except:\n raise\n\n\n try:\n self.logfile.close()\n except:\n pass\n\n\n","repo_name":"lolizz00/__RF_Test_Tool","sub_path":"Python/PN_test.py","file_name":"PN_test.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27064784843","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndef get_soup(url):\n try:\n page=requests.get(url)\n if page.status_code == 200:\n print('Connected to the server Successfully')\n return BeautifulSoup(page.text,'html.parser')\n else:\n print('Failed',page.status_code)\n except Exception as e:\n print(e)\n\ndef extract_data(page_soup):\n\n container = page_soup.find_all(\"div\",attrs={\"class\":\"product-desc-rating\"})\n #len(container)\n\n\n mylist = []\n for item in container:\n \n title = item.find(\"p\",attrs={\"class\":\"product-title\"}).text#.find(\"span\")\n\n #print(title)\n original_price = item.find(\"div\",attrs={\"class\":\"lfloat marR10\"}).find(\"span\",attrs={\"class\":\"lfloat product-desc-price strike \"}).text\n\n #print(original_price)\n discounted_price= item.find(\"div\",attrs={\"class\":\"lfloat marR10\"}).find(\"span\",attrs={\"class\":\"lfloat product-price\"}).text\n #print(discounted_price)\n try:\n bachat = item.find(\"div\",attrs={\"class\":\"product-discount\"}).find('span').text.split()\n #print(bachat)\n except Exception as e:\n #print(bachat,e)\n #pass\n bachat=0\n #else:\n # bachat=0\n #Get_rattings = item.find(\"div\",attrs={\"class\":\"rating-stars \"}).text\n #print(Get_rattings)\"\"\"\n mylist.append({\n 'title':title,\n \"original_price\":original_price,\n \"discounted_price\":discounted_price,\n 'bachat':bachat,\n\n })\n return mylist\n\ndef get_section(last_section):\n \n last_section =2*10\n \n return last_section\n\n\ndef save_csv(datadict,path):\n data=pd.DataFrame(datadict)\n data.to_csv(path)\n return data\n\ndef save_sql(datadict, db):\n data=pd.DataFrame(datadict)\n data.to_sql('snapdeal',db.engine,index=False)\n return data\n\n\n\nif __name__ == \"__main__\":\n query = 'saree'\n section=0\n url=f\"https://www.snapdeal.com/acors/json/product/get/search/0/{section}/20?q=&sort=rlvncy&brandPageUrl=&keyword={query}&searchState=k3=true|k5=0|k6=0|k7=/yeUxAAIQAAAAAAAAAAAAAAAAAAAAABA|k8=0&pincode=&vc=&webpageName=searchResult&campaignId=&brandName=&isMC=false&clickSrc=go_header&showAds=true&cartId=&page=srp\"\n \n start_page = 0\n product_list = []\n while True:\n #print('url',url)\n soup = get_soup(url)\n itemlist = extract_data(soup)\n if itemlist:\n product_list.extend(itemlist)\n #print('total items',len(product_list))\n section = get_section(soup)\n if section>1:\n url = next_section =f\"https://www.snapdeal.com/acors/json/product/get/search/0/{section}/20?q=&sort=rlvncy&brandPageUrl=&keyword={query}&searchState=k3=true|k5=0|k6=0|k7=/yeUxAAIQAAAAAAAAAAAAAAAAAAAAABA|k8=0&pincode=&vc=&webpageName=searchResult&campaignId=&brandName=&isMC=false&clickSrc=go_header&showAds=true&cartId=&page=srp\"\n \n start_page +=1\n else:\n print('finished')\n break\n if len(product_list) >=100:\n break\n save_csv(product_list,'productdeatail.csv')","repo_name":"ashi9496/bachatapp","sub_path":"snapdeal.py","file_name":"snapdeal.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41297085416","text":"import boto3\nimport json\nimport os\nimport uuid\n\nfrom config import *\nfrom response import *\nfrom error_messages import *\nfrom utils import convert_current_date_to_iso8601, aws_get_identity_id, get_num_prj\nimport const\n\n\nfrom lambda_base_class import LambdaBaseClass\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom models.annotaition.anno_data_model import AnnoDataModel\nfrom models.annotaition.anno_label_info_model import AnnoLabelInfoModel\n\n\nclass GetFileInfoClass(LambdaBaseClass):\n\n def __init__(self) -> None: \n super().__init__() \n self.model_data = AnnoDataModel(self.env.TABLE_ANNO_DATA_ORI)\n self.model_label_info = AnnoLabelInfoModel(self.env.TABLE_ANNO_LABEL_INFO)\n\n @LambdaBaseClass.parse_body\n def parser(self, body):\n self.logger.debug(f\"body in main_parser: {body}\")\n\n self.id_token = body[KEY_NAME_ID_TOKEN] \n self.project_id = body[\"project_id\"]\n self.file_name = body[\"filename\"]\n self.category_id = body.get(\"category_id\", \"\")\n\n def _check_input_value(self): \n return \n\n def handle(self, event, context):\n \n ### parse body\n self.parser(event)\n\n ### check identity\n identity_id = self.get_identity(self.id_token, self.env.USER_POOL_ID, self.env.IDENTITY_POOL_ID)\n\n ### get file info including link to s3 of segmentation prelabel\n file_info = self.model_data.get_item(self.project_id, self.file_name)\n file_id = file_info[AnnoDataModel.FIELD_FILE_ID]\n\n ### get label json info: label with category and json label\n if len(self.category_id)==0:\n label_info = self.model_label_info.query_all_category_label(file_id)\n else:\n label_info = self.model_label_info.get_label_info_of_category(file_id, self.category_id)\n \n return generate_response(\n message=\"OK\",\n status_code=HTTPStatus.OK,\n data={\n \"file_info\": file_info,\n \"label_info\": label_info\n },\n )\n\n@error_response\ndef lambda_handler(event, context):\n\n return GetFileInfoClass().handle(event, context)","repo_name":"daita-technologies/backend","sub_path":"annotation-app/annotation-service/api-handler-functions/get-file-info-n-label/hdler_get_file_info_n_label.py","file_name":"hdler_get_file_info_n_label.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"40813592885","text":"# Program to find digital sum of a given Number\r\n# example: n=123 Digital sum----->1+2+3=6\r\n\r\n# By - Mayank Singh\r\n# Date of Creation - 22-10-2021\r\n# Last Modified - 25-10-2021\r\n\r\n# initialization\r\nsum = 0\r\n\r\nnum = int(input(\"Enter a number: \"))\r\n\r\nwhile num>0:\r\n temp = num%10\r\n sum += temp\r\n num = int(num/10)\r\n\r\nprint(\"Sum of all the digits of the number is: \", sum)","repo_name":"Mayankheropc/Python","sub_path":"18 - Program to find digital sum of a given Number.py","file_name":"18 - Program to find digital sum of a given Number.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32875736289","text":"from django.urls import path\nfrom . import views\n\napp_name = 'maket'\n\nurlpatterns = [\n path('maket_base', views.maket_base, name='maket_base'),\n path('maket/maket', views.maket, name='maket'),\n\n path('maket_status///', views.maket_status, name='maket_status'),\n path('save_to_film//', views.save_to_film, name='save_to_film'),\n path('upload_maket/', views.upload_maket, name='maket_order'),\n path('download_maket/', views.download_maket, name='download_maket'),\n path('look_up/', views.look_up, name='look_up'),\n path('delete_maket', views.delete_maket, name='delete_maket'),\n path('maket_check_status/', views.maket_check_status, name='maket_check_status'),\n path('look_up_not_finished/', views.look_up_not_finished, name='look_up_not_finished'),\n]","repo_name":"tsibul/Makety","sub_path":"maket/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35094876063","text":"import sys\nfrom functools import reduce\n\nN = int(sys.stdin.readline())\n\nfirst_sen ='어느 한 컴퓨터공학과 학생이 유명한 교수님을 찾아가 물었다.'\n\nanswer = '\"재귀함수가 뭔가요?\"'\n\nsentence_1_a = '\"잘 들어보게. 옛날옛날 한 산 꼭대기에 이세상 모든 지식을 통달한 선인이 있었어.'\nsentence_1_b = '마을 사람들은 모두 그 선인에게 수많은 질문을 했고, 모두 지혜롭게 대답해 주었지.'\nsentence_1_c = '그의 답은 대부분 옳았다고 하네. 그런데 어느 날, 그 선인에게 한 선비가 찾아와서 물었어.\"'\n\nsentence_2 = '\"재귀함수는 자기 자신을 호출하는 함수라네\"'\n\nsentence_3 = '라고 답변하였지.'\n\nprint(first_sen)\nfor i in range(N+1):\n print(i*'____'+answer)\n if i < N :\n print(i*'____'+sentence_1_a)\n print(i*'____'+sentence_1_b)\n print(i*'____'+sentence_1_c)\n else:\n print(i*'____'+sentence_2)\n for j in range(N,-1,-1):\n print(j*'____'+sentence_3)\n\n\n\n","repo_name":"meohyun/baekjoon","sub_path":"17478.py","file_name":"17478.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22823795400","text":"from npc.Npc import Npc\nimport pygame\nfrom artifacts.Artifact import Artifact\nimport os\nfrom pathlib import Path\nfrom artifacts.AttackClass import AttackClass\n\ncurrent = os.path.dirname(os.path.realpath(__file__))\npath = Path(__file__).resolve().parent.parent.parent\n\n\n# Class for a npc of type Orc, inherits from Npc class inheriting from Character class\nclass Orc(Npc):\n def __init__(self, name, side, mana, life, images, artifacts, quests, x, y, pos, groups,\n collision_sprites):\n super().__init__(name, side, mana, life, images, artifacts, quests, pos, groups, collision_sprites)\n self.rect.x = x\n self.rect.y = y\n self.race = \"Orc\"\n self.collision_sprites = collision_sprites\n self.can_talk = False\n blood_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"orc_blood.PNG\")).convert_alpha()\n self.blood = Artifact(blood_image, 10, 'Orc Blood', None)\n mace_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"orc_mace.PNG\")).convert_alpha()\n self.mace = Artifact(mace_image, 10, 'Orc Mace', None)\n self.artifacts.add(self.blood, self.mace)\n potion_image = pygame.image.load(os.path.join(path, \"resources/graphics/artifacts\", \"magic_potion.PNG\")).convert_alpha()\n self.potion = Artifact(potion_image, 10, 'Life Potion', None)\n self.gifts.add(self.potion)\n mud_image = pygame.image.load(os.path.join(path, \"resources/graphics/particles\", \"mud.PNG\")).convert_alpha()\n self.npc_attack = AttackClass(mud_image, 20, 10, 'orc attack')\n self.context = Path(\"../NLP/context/OrcContext.txt\").read_text()\n\n","repo_name":"Rudaq/MagicalWorld","sub_path":"game/npc/Orc.py","file_name":"Orc.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18875873455","text":"from django.shortcuts import render, HttpResponse, redirect\n\nfrom django.conf import settings\n#from django.utils.safestring import mark_safe\n\nimport random\n\n#REST-APIS\nimport io\nfrom knox.views import LoginView as KnoxLoginView, APIView\n\nfrom numpy import void\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import IsAdminUser, AllowAny, IsAuthenticated\nfrom rest_framework.authentication import TokenAuthentication\n\nimport datetime\nfrom datetime import datetime, date\n\ntoday = date.today()\ntoday_now = datetime.now()\n\n#DATABEASES & FUNCTIONS\n#from django.contrib.auth.models import User\nfrom .models import V1_UserOtp as UserOtp\nfrom apimaster.models import *\nfrom .sitefunction import *\nfrom . filter import *\n\n#SERIALIZERS\nfrom .serializers import *\n\nfrom apimaster.serializers import * \n\n\nclass UserLoginOtp(KnoxLoginView):\n permission_classes = [AllowAny]\n\n def post(self, request, format=None):\n \n json_data = JSONRenderer().render(request.data)\n stream = io.BytesIO(json_data)\n request_data = JSONParser().parse(stream)\n data = {'response':'error', 'status':400}\n tag = request.GET.get('tag','')\n\n try:\n user = (get_or_create_user(request_data['username']))\n except:\n data['msg'] = 'Username Is Not Found'\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json')\n\n if tag == 'set-otp':\n data = setOtp(user)\n else:\n try:\n data = login_with_otp(user, request_data['otp'], request)\n except:\n data['msg'] = 'Otp Is Not Found'\n\n\n # else:\n # print(get_ip_or_location(request)) \n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json')\n\nclass UserProfileCRUD(APIView):\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n data = {'status':404, 'response':'error', 'msg':'Failed To Create Or Get User'}\n x_profile = get_or_create_userprofile(request)\n if (x_profile == 404):\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json') \n \n profile = UserProfiles.objects.filter(id=x_profile.id)\n srz = UserProfilesSerializer(profile, many=True)\n data['data'] = {\"user\":x_profile.json_display,\"profile\":srz.data}\n data['response'] = 'success'\n data['status'] = 200\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json') \n\n def post(self, request, format=None):\n data = {'response':'error', 'status':400, 'msg':'Api Response'}\n\n attach_file = request.FILES.get('attach_file')\n \n if attach_file:\n request.data['attach_file'] = ''\n\n json_data = JSONRenderer().render(request.data)\n stream = io.BytesIO(json_data)\n request_data = JSONParser().parse(stream)\n\n if attach_file:\n request_data['photo'] = attach_file\n\n update_profile = UserProfiles.objects.get(user=request.user)\n form = UserProfilesSerializer(update_profile, data=request_data)\n\n\n # try:\n # form.is_valid(raise_exception=True)\n # print(\"ok.....\",form.is_valid())\n # except Exception as e:\n # print(e) \n\n try:\n user_profile = UserProfiles.objects.get(user=request.user)\n except:\n user_profile = UserProfiles(user=request.user) \n user_profile.save()\n\n try:\n if user_profile.user.first_name != request_data['name']:\n user_profile.user.first_name = request_data['name']\n \n except:\n pass \n\n try:\n if user_profile.user.email != request_data['email']:\n user_profile.user.email = request_data['email']\n \n except:\n pass\n\n user_profile.user.save() \n\n \n\n if form.is_valid():\n try:\n form.save()\n data['response'] = 'success'\n data['status'] = 200\n data['msg'] = \"Successfully Update...\"\n except Exception as e:\n data['msg'] = f\"{e}\"\n \n else:\n data['msg'] = f\"Form Not Valid\"\n \n\n profile = UserProfiles.objects.filter(user=request.user)\n srz = UserProfileSrz(profile, many=True)\n data['data'] = {\"user\":update_profile.json_display,\"profile\":srz.data}\n \n\n #PARTIAL UPDATE CUSTOM FUNCTIOn TO API\n # update_profile = get_or_update_userprofile(request, request_data)\n # profile = UserProfiles.objects.filter(id=update_profile['profile'].id)\n # srz = UserProfileSrz(profile, many=True)\n # data['data'] = {\"user\":update_profile['profile'].json_display,\"profile\":srz.data}\n # data['response'] = update_profile['response']\n # data['status'] = update_profile['status']\n # data['msg'] = update_profile['msg']\n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json')\n\n\n#Filter Doctor\nclass DoctorsApiView(APIView):\n #authentication_classes = [TokenAuthentication]\n permission_classes = [AllowAny]\n\n def get(self, request, format=None):\n filter_data = FilterDoctorProfile(request)\n filter_data['data'] = UserProfilesSerializer(filter_data['data'], many=True).data\n\n filter_data['info'] = {'test':'oky'}\n\n json_data = JSONRenderer().render(filter_data)\n return HttpResponse(json_data, content_type='application/json')\n\n def post(self, request, format=None):\n data = {}\n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json') \n\n def put(self, request, format=None):\n data = {}\n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json')\n\n def delete(self, request, format=None):\n data = {}\n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json') \n\nclass DoctorsProfileApi(APIView):\n permission_classes = [AllowAny]\n\n def get(self, request, id):\n \n try:\n filter_data = UserProfiles.objects.filter(id=int(id), occupation='doctor')\n user_data = UserProfiles.objects.get(id=int(id), occupation='doctor')\n except:\n filter_data = None\n json_data = JSONRenderer().render({\"status\":400, \"message\":\"Id Is Not Found!\", \"data\":{}})\n return HttpResponse(json_data, content_type='application/json')\n\n doctor = UserProfilesSerializer(filter_data, many=True).data \n\n clinic = UsersClinics.objects.filter(user=user_data.user)\n clinic_srz = UsersClinicSerializer(clinic, many=True).data\n\n council = UsersRegistrationCouncils.objects.filter(user=user_data.user)\n council_srz = DoctorCouncilSerializer(council, many=True).data\n\n\n education = UsersEducations.objects.filter(user=user_data.user)\n education_srz = UsersEducationSerializer(education, many=True).data\n\n identityProof = UsersIdentityProofs.objects.filter(user=user_data.user)\n id_srz = UsersIdentityProofSerializer(identityProof, many=True).data\n\n registration = MedicalRegistrationProofs.objects.filter(user=user_data.user)\n registration_srz = MedicalRegistrationProofSerializer(registration, many=True).data\n\n establish = EstablishmentProofs.objects.filter(user=user_data.user)\n establish_srz = EstablishmentProofsSerializer(establish, many=True).data\n\n mapLocation = MapLocations.objects.filter(user_id=user_data.id)\n map_srz = MapLocationsSerializer(mapLocation, many=True).data\n\n consultationFee = ConsultationFees.objects.filter(user_id=user_data.id) \n\n consult_srz = ConsultationFeesSerializer(consultationFee, many=True).data\n\n data = {\n 'data': {\n \"doctor\":doctor,\n \"user\":user_data.json_display,\n\n \"treatmentdata\": {\n \"id\": 1,\n \"name\": \"General Medicine\",\n \"description\": \"General Medicine\",\n \"primary_image\": \"/media/treatments/livercontent_hEXgOAC.png\",\n },\n\n\n 'hosiptal': clinic_srz,\n\n\n 'expertise': {\n 'id':1,\n 'name':'General Physician',\n },\n 'reviews': [\n {\n 'id':1,\n 'review':'good service',\n 'rate':4.1,\n 'appointment_id':44,\n 'doctor_id':44,\n 'user_id':1,\n 'created_at':\"2021-06-05 11:06:10\",\n 'updated_at':\"2021-06-05 11:06:10\",\n 'user': {\n 'name':'Anshu g',\n 'image':\"/media/profile_photo/user.png\",\n 'fullImage':\"/media/profile_photo/user.png\",\n },\n },\n {\n 'id':2,\n 'review':'good service',\n 'rate':4.1,\n 'appointment_id':44,\n 'doctor_id':44,\n 'user_id':1,\n 'created_at':\"2021-06-05 11:06:10\",\n 'updated_at':\"2021-06-05 11:06:10\",\n 'user': {\n 'name':'Anshu g',\n 'image':\"/media/profile_photo/user.png\",\n 'fullImage':\"/media/profile_photo/user.png\",\n },\n },\n {\n 'id':3,\n 'review':'good service',\n 'rate':4.1,\n 'appointment_id':44,\n 'doctor_id':44,\n 'user_id':1,\n 'created_at':\"2021-06-05 11:06:10\",\n 'updated_at':\"2021-06-05 11:06:10\",\n 'user': {\n 'name':'Anshu g',\n 'image':\"/media/profile_photo/user.png\",\n 'fullImage':\"/media/profile_photo/user.png\",\n },\n },\n ],\n \n 'council_detail': council_srz,\n \n 'education_detail': education_srz,\n \n 'clinic_detail': clinic_srz,\n \n 'identity_proof_detail': id_srz,\n\n 'registration_detail': registration_srz,\n 'establishment_detail': establish_srz,\n 'maplocations_detail': map_srz,\n\n 'consultationfee_detail': consult_srz,\n },\n \n \"recommended_doctors\": [\n\n {\n \"id\": 1,\n \"name\": \"dash kamal\",\n \"mobile\": \"9846584616\",\n \"email\": \"dashkamal@mailinator.com\",\n \"photo\": \"/media/profile_photo/image_Wn6BFli.png\",\n \"sex\": \"male\",\n \"dob\": \"2021-11-10\",\n \"occupation\": \"doctor\",\n \"about\": \"\",\n \"work_experience\": None,\n \"description\": \"update base64 image 2021-12-29 15:58:18\",\n \"specialties\": None,\n \"specialty_id\": 1,\n \"language\": None,\n \"blood_group\": \"\",\n \"locality\": \"\",\n \"address\": \"noida\",\n \"address2\": \"\",\n \"city\": \"\",\n \"state\": \"\",\n \"country\": \"\",\n \"pincode\": None,\n \"latitude_coordinate\": \"\",\n \"longitude_coordinate\": \"\",\n \"verification\": 1,\n \"verification_text\": \"Aprooved\",\n \"created_by\": None,\n \"created_at\": \"2021-11-29T17:49:31.472602+05:30\",\n \"status\": True,\n \"treatmentdata\": [\n {\n \"id\": 1,\n \"name\": \"General Medicine\",\n \"description\": \"General Medicine\",\n \"primary_image\": \"/media/treatments/livercontent_hEXgOAC.png\"\n }\n ]\n },\n {\n \"id\": 19,\n \"name\": \"yogi4\",\n \"mobile\": None,\n \"email\": \"yogi4@mailinator.com\",\n \"photo\": None,\n \"sex\": \"male\",\n \"dob\": None,\n \"occupation\": \"doctor\",\n \"about\": None,\n \"work_experience\": None,\n \"description\": None,\n \"specialties\": None,\n \"specialty_id\": 1,\n \"language\": None,\n \"blood_group\": None,\n \"locality\": None,\n \"address\": None,\n \"address2\": None,\n \"city\": None,\n \"state\": None,\n \"country\": None,\n \"pincode\": None,\n \"latitude_coordinate\": None,\n \"longitude_coordinate\": None,\n \"verification\": 1,\n \"verification_text\": \"Aprooved\",\n \"created_by\": None,\n \"created_at\": \"2021-12-12T20:53:42.382056+05:30\",\n \"status\": True,\n \"treatmentdata\": [\n {\n \"id\": 2,\n \"name\": \"General Surgery\",\n \"description\": \"General Surgery\",\n \"primary_image\": \"/media/treatments/WomanHealth.jpg\"\n }\n ]\n },\n {\n \"id\": 20,\n \"name\": \"yogi to 102\",\n \"mobile\": None,\n \"email\": \"yogi6@mailinator.com\",\n \"photo\": \"/media/profile_photo/20723-2-mario-image_pcRTCQ0.png\",\n \"sex\": \"male\",\n \"dob\": None,\n \"occupation\": \"doctor\",\n \"about\": None,\n \"work_experience\": None,\n \"description\": \"demo description\",\n \"specialties\": None,\n \"specialty_id\": 1,\n \"language\": None,\n \"blood_group\": None,\n \"locality\": None,\n \"address\": None,\n \"address2\": None,\n \"city\": None,\n \"state\": None,\n \"country\": None,\n \"pincode\": None,\n \"latitude_coordinate\": None,\n \"longitude_coordinate\": None,\n \"verification\": 2,\n \"verification_text\": \"Rejected\",\n \"created_by\": None,\n \"created_at\": \"2021-12-12T20:57:44.296325+05:30\",\n \"status\": False,\n \"treatmentdata\": [\n {\n \"id\": 1,\n \"name\": \"General Medicine\",\n \"description\": \"General Medicine\",\n \"primary_image\": \"/media/treatments/livercontent_hEXgOAC.png\"\n },\n {\n \"id\": 2,\n \"name\": \"General Surgery\",\n \"description\": \"General Surgery\",\n \"primary_image\": \"/media/treatments/WomanHealth.jpg\"\n },\n {\n \"id\": 3,\n \"name\": \"Psychiatry\",\n \"description\": None,\n \"primary_image\": None\n },\n {\n \"id\": 4,\n \"name\": \"General Physician\",\n \"description\": None,\n \"primary_image\": None\n }\n ]\n },\n {\n \"id\": 31,\n \"name\": \"yogi to 121\",\n \"mobile\": None,\n \"email\": None,\n \"photo\": \"/media/profile_photo/image_lKCsLDD.png\",\n \"sex\": \"male\",\n \"dob\": None,\n \"occupation\": \"doctor\",\n \"about\": None,\n \"work_experience\": None,\n \"description\": \"update base64 image 2022-01-21 12:19:08\",\n \"specialties\": None,\n \"specialty_id\": None,\n \"language\": \"['ab', 'en']\",\n \"blood_group\": None,\n \"locality\": None,\n \"address\": None,\n \"address2\": None,\n \"city\": None,\n \"state\": None,\n \"country\": None,\n \"pincode\": None,\n \"latitude_coordinate\": None,\n \"longitude_coordinate\": None,\n \"verification\": 0,\n \"verification_text\": None,\n \"created_by\": None,\n \"created_at\": \"2021-12-29T22:30:58.906327+05:30\",\n \"status\": False,\n \"treatmentdata\": [\n {\n \"id\": 1,\n \"name\": \"General Medicine\",\n \"description\": \"General Medicine\",\n \"primary_image\": \"/media/treatments/livercontent_hEXgOAC.png\"\n },\n {\n \"id\": 2,\n \"name\": \"General Surgery\",\n \"description\": \"General Surgery\",\n \"primary_image\": \"/media/treatments/WomanHealth.jpg\"\n },\n {\n \"id\": 3,\n \"name\": \"Psychiatry\",\n \"description\": None,\n \"primary_image\": None\n },\n {\n \"id\": 4,\n \"name\": \"General Physician\",\n \"description\": None,\n \"primary_image\": None\n }\n ]\n }\n \n ],\n 'message': 'Details successfully get',\n 'status' : 200\n }\n\n json_data = JSONRenderer().render(data)\n return HttpResponse(json_data, content_type='application/json')\n\n\n# class AppoimentApiView(APIView):\n# authentication_classes = [TokenAuthentication]\n# permission_classes = [IsAuthenticated]\n\n# def get(self, request, format=None):\n# data = {}\n\n# json_data = JSONRenderer().render(data)\n# return HttpResponse(json_data, content_type='application/json')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass CheckApi(KnoxLoginView):\n authentication_classes = [TokenAuthentication]\n permission_classes = [IsAuthenticated]\n def get(self, request, format=None):\n res = {}\n x_profile = get_or_create_userprofile(request)\n profile = UserProfiles.objects.filter(id=x_profile.id)\n srz = UserProfileSrz(profile, many=True)\n res['data'] = {\"user\":x_profile.json_display,\"profile\":srz.data}\n res['response'] = 'success'\n res['status'] = 200\n json_data = JSONRenderer().render(res)\n return HttpResponse(json_data, content_type='application/json')\n\n","repo_name":"romsha28/hospital_python","sub_path":"v1/patientapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4421500088","text":"import helper\n\nclass Node(object):\n def __init__(self, board, curPlayerColor, depth, MIN_OR_MAX, prevMove):\n # These variables remain unchanged after initialization.\n self.board = board\n self.curPlayerColor = curPlayerColor\n self.depth = depth\n self.MIN_OR_MAX = MIN_OR_MAX\n self.prevMove = prevMove\n\n # These variables will be updated as the minimax tree is built.\n self.childChoice = None\n self.children = []\n self.value = 0\n\ndef buildTree(agent, node, alpha=float(\"-inf\"), beta=float(\"inf\")):\n # Increment the agent's expanded Node counter\n\tagent.expandedNodes += 1\n\n # Before building the children, the current board layout is\n # checked for a win (a chain of 5 stones) for either player or a draw.\n\tboardState = node.board.winOrDraw()\n\trootColor = agent.player\n\n # If the current board layout is a win or a draw, no subtree should be built.\n # Instead, the appropriate value should be assigned to the parameter Node.\n\tif boardState is not None:\n # If the current board layout is a draw, the parameter Node's value is 0.\n\t\tif boardState == \"DRAW\":\n\t\t\tnode.value = 0\n\t\t\treturn\n\t\telif boardState == \"BLUE\" or boardState == \"RED\":\n # If the color of this tree's root Node matches the color of the\n # chain of 5 stones, the parameter Node's value should be set to\n # positive infinity to represent a win for the original player.\n # Otherwise, it should be set to negative infinity to represent a\n # loss for the original player.\n\t\t\tif rootColor == boardState:\n\t\t\t\tnode.value = float(\"inf\")\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tnode.value = float(\"-inf\")\n\t\t\t\treturn\n\t\telse:\n\t\t\traise ValueError(\"The winOrDraw function isn't returning a correct value.\")\n\n # If this is level 3, call the evaluation function to set the\n # parameter Node's value and return.\n\tif node.depth >= 3:\n\t\tpatterns = node.board.getPatterns()\n\t\tblocks = helper.findBlocks(node.board)\n\t\tnode.value = helper.evalLayout(rootColor, patterns, blocks)\n\t\treturn\n\n # Initialize variables for determining whether or not the color of the next\n # stone to be placed is red, the color of the next player, whether the\n # children of the parameter Node are MIN or MAX nodes, storing the depth of the next\n # layer in the game tree, and storing a parent pointer to the parameter Node\n\tisSettingRed = False\n\tnextPlayerColor = None\n\tnextDepth = node.depth + 1\n\tnextMIN_OR_MAX = None\n\n # Determine whether or not the next stone to be placed is red\n # and what the color of the next player is\n\tif node.curPlayerColor == \"RED\":\n\t\tisSettingRed = True\n\t\tnextPlayerColor = \"BLUE\"\n\telif node.curPlayerColor == \"BLUE\":\n\t\tisSettingRed = False\n\t\tnextPlayerColor = \"RED\"\n\telse:\n\t\traise ValueError(\"curPlayerColor must be 'RED' or 'BLUE'!\")\n\n # Determine whether the new Node is a MIN or MAX node\n\tif node.MIN_OR_MAX == \"MIN\":\n\t\tnextMIN_OR_MAX = \"MAX\"\n\telif node.MIN_OR_MAX == \"MAX\":\n\t\tnextMIN_OR_MAX = \"MIN\"\n\telse:\n\t\traise ValueError(\"MIN_OR_MAX must be 'MIN' or 'MAX'!\")\n\n # Initialize a variable that identifies whether the value of the parameter Node\n # should be the minimum or maximum value among its children and a variable to\n # store that value\n\tcandidateValue = None\n\tisMax = False\n\tif node.MIN_OR_MAX == \"MIN\":\n\t\tcandidateValue = float(\"inf\")\n\t\tisMax = False\n\telif node.MIN_OR_MAX == \"MAX\":\n\t\tcandidateValue = float(\"-inf\")\n\t\tisMax = True\n\telse:\n\t\traise ValueError(\"MIN_OR_MAX must be 'MIN' or 'MAX'!\")\n\n # Loop over all squares in the current board to consider\n # all the possibilities for the next move\n\tfor x in range(node.board.dim):\n\t\tfor y in range(node.board.dim):\n\t\t\tif node.board.board[x][y].char == '.':\n # Set the stone in this square to set up the board for evaluating\n # the children of the newly created node\n\t\t\t\tnode.board.setPiece(x, y, isSettingRed)\n\n # Add the move to the list of previous moves for storing it in the new Node\n\t\t\t\tnextPrevMove = (x, y)\n\n # Create the child Node, add it to the parameter's list of child Nodes,\n # and recurse on it to build its subtree\n\n #child = Node(node.board, nextPlayerColor, nextDepth, nextMIN_OR_MAX, nextParent, nextPrevMove)\n\t\t\t\tchild = Node(node.board, nextPlayerColor, nextDepth, nextMIN_OR_MAX, nextPrevMove)\n\t\t\t\tnode.children.append(child)\n\t\t\t\tbuildTree(agent, child, alpha, beta)\n\n\t\t\t\t# Remove the stone from this square to reset the board for\n\t\t\t\t# considering other empty squares in which to place a stone\n\t\t\t\tnode.board.unsetPiece()\n\n\t\t\t\t# Update the parameter Node's value and move choice if appropriate\n # (If the parameter Node is a MAX node and the child's value is greater\n # than or equal to the canddidate value or a MIN node and the child's\n # value is less than or equal to the candidate value)\n\t\t\t\tif (isMax and child.value > candidateValue) \\\n\t\t\t\tor ((not isMax) and child.value < candidateValue) \\\n\t\t\t\tor (node.childChoice is None):\n\t\t\t\t\tcandidateValue = child.value\n\t\t\t\t\tnode.value = candidateValue\n\t\t\t\t\tnode.childChoice = child\n\n # If the agent is an alpha-beta agent, prune the\n # parameter Node if appropriate\n\t\t\t\t\tif agent.name == \"ALPHA_BETA\":\n\t\t\t\t\t\tif isMax:\n # If the parameter Node is a MAX node and its new value is\n # greater than or equal to beta, prune it. Otherwise,\n # update alpha if the new value is greater than alpha.\n\t\t\t\t\t\t\tif node.value >= beta:\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\telif node.value > alpha:\n\t\t\t\t\t\t\t\talpha = node.value\n\t\t\t\t\t\telse:\n # If the parameter Node is a MIN node and its new value is\n # less than or equal to alpha, prune it. Otherwise,\n # update beta if the new value is less than beta.\n\t\t\t\t\t\t if node.value <= alpha:\n\t\t\t\t\t\t\t return\n\t\t\t\t\t\t elif node.value < beta:\n\t\t\t\t\t\t\t beta = node.value\n","repo_name":"szymkosz/AILIENS","sub_path":"MP2_Planning_Games/Part2/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36909429106","text":"#! /usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\"\"\"\nCode to perform image transformations and dimensionality reduction.\nAuthor: Harsh Bhate\nDate: April 15\n\"\"\"\n\nimport cv2\nimport gym\nimport numpy as np\nfrom sklearn.preprocessing import normalize\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\nclass transformImage(object):\n \"\"\"Class that performs image transformation.\n \"\"\"\n \n def __init__(self):\n pass\n \n def display_image(self, image):\n \"\"\"Function to display image\"\"\"\n cv2.imshow('Image', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n def grayscale(self, image, scale=0.50):\n \"\"\"Function to standardize image and convert to float\n \"\"\"\n img = cv2.resize(image,\n None,\n fx=scale,\n fy=scale)\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n def pca_compression(self, image, component=0.75):\n \"\"\"Function to perform PCA on the image\n \"\"\"\n img = normalize(image)\n pca = PCA(component)\n return pca.fit_transform(img)\n\n def threshold(self, image):\n \"\"\"Uses average thresholding to binarize image\n \"\"\"\n if image.ndim > 2:\n raise ValueError('Please send a singular vector')\n if image.ndim == 2:\n _, component = np.shape(image)\n if component != 1:\n raise ValueError('Please check Vector Dimension')\n else:\n image = image.flatten()\n if image.ndim == 0:\n raise ValueError('Please send a valid vector')\n zero_threshold = 0\n mean = np.mean(image)\n\n\n","repo_name":"snigdhach/learning_control_policies_atari","sub_path":"q_learning/image_preproces.py","file_name":"image_preproces.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24838879848","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\nimport io\nimport operator\nimport os\nimport sys\n\nimport requests\n\nPATH = 'https://cloud.centos.org/centos/7/images/'\nINDEX = PATH + 'image-index'\n\nLATEST = 'latest'\n\n\ndef image_index():\n index = requests.get(INDEX)\n filelike = io.StringIO(index.text)\n cp = configparser.ConfigParser()\n cp.readfp(filelike)\n data = {sec: dict(cp.items(sec)) for sec in cp.sections()}\n for sec in data:\n data[sec]['url'] = PATH + data[sec]['file']\n return data\n\n\ndef newest_image():\n return max(image_index().values(), key=operator.itemgetter('revision'))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-r', '--revision', type=str, default=LATEST,\n help='Revision to build with, usually of the format YYMM')\n parser.add_argument('variant', type=str,\n help='Image variant to build.') # extra elements defined in the .sh\n\n args = parser.parse_args()\n\n if args.revision == LATEST:\n image = newest_image()\n else:\n try:\n image = next(i for i in image_index().values() if i['revision'] == args.revision)\n except StopIteration:\n print(\"No image found for revision '{}'\".format(args.revision))\n return 1\n\n # os.environ['IMAGE_URL'] = image['url']\n os.environ['BASE_IMAGE_XZ'] = image['file']\n os.environ['IMAGE_REVISION'] = image['revision']\n os.environ['IMAGE_SHA512'] = image['checksum']\n os.environ['BASE_IMAGE'] = image['file'][:-3]\n\n os.execl('create-image.sh', 'create-image.sh', args.variant)\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"JieyangChen7/CC-CentOS7-BEE","sub_path":"create-image.py","file_name":"create-image.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26508664996","text":"# 04.A tabela abaixo demonstra a quantidade de vendas dos fabricantes de veículos durante o período de 2013 a 2018, em\n# mil unidades.\n# Faça um programa que:\n# a) leia os dados da tabela pelo teclado;\n# b) leia um ano do período determine e exiba o fabricante que mais vendeu nesse ano;\n# c) determine e exiba o ano de maior volume geral de vendas.\n# d) determine e exiba a média anual de vendas de cada fabricante durante o período.\n\n# Declaracao da tupla de carros\ncars = ['Fiat', 'Ford', 'GM', 'Wolkswagen']\n\nsumsOfYear = []\nbestYearOfSelling = 2013\n\n# Declaracao de matriz (inicialmente só tem uma linha, mesma coisa de um vetor unidimensional)\nmatrix = []\n\n# a) leia os dados da tabela pelo teclado;\n# Percorre cada carro c\nfor c in range(0, 4):\n # A linha C está inicialmente vazia\n linha = []\n # Percorre cada quantidade vendida do c no ano y\n for y in range(2013, 2019):\n value = int(input().strip())\n if (c == 0):\n sumsOfYear.append(value)\n else:\n sumsOfYear[y-2013] += value\n # Adiciona o value à linha C\n linha.append(value)\n\n # Adiciona a linha C à matrix\n matrix.append(linha)\n\nyear = int(input().strip())\n\n# Considera que inicialmente a quantidade mais vendida é a primeira\nb = matrix[0][year-2013]\n\n# Considera que inicialmente o fabricante que mais vendeu é o primeiro\nmark = cars[0]\n\n# Percorre cada carro\nfor c in range(0, 4):\n # Verifica se a quantidade vendida desse carro no ano year é maior que b\n if (matrix[c][year-2013] > b):\n # Se for, b é o novo maior, e mark é o cars[c]\n b = matrix[c][year-2013]\n mark = cars[c]\n\n# b) leia um ano do período determine e exiba o fabricante que mais vendeu nesse ano;\nprint(\n f'A fabricante que mais vendeu em {year} foi a {mark} com {b} mil unidades.')\n\n# Considera que inicialmente o melhor ano de vendas é o de 2013\nbestSumOfSelling = sumsOfYear[0]\n\n# Percorre as somas de vendas em cada ano, de 2013 a 2018\nfor y in range(2013, 2019):\n if (sumsOfYear[y-2013] > bestSumOfSelling):\n bestYearOfSelling = y\n bestSumOfSelling = sumsOfYear[y-2013]\n\n# c) determine e exiba o ano de maior volume geral de vendas.\nprint(\n f'O ano de maior volume geral de vendas foi {bestYearOfSelling} com {bestSumOfSelling} mil unidades.')\n\n# d) determine e exiba a média anual de vendas de cada fabricante durante o período.\n# Começa considerando sum = 0\nsum = [0, 0, 0, 0]\nfor i in range(0, 4):\n for j in range(2013, 2019):\n sum[i] += matrix[i][j-2013]\n\nprint(\"A média anual de vendas de cada fabricante entre 2013 e 2018 foi:\")\nfor i in range(4):\n print(f'A {cars[i]} vendeu em média {round(sum[i]/6, 2)} unidades por ano.')\n","repo_name":"jonataslaet/ifpi","sub_path":"pec/20220613/run.codes/Sem-15-T1-Q4.py","file_name":"Sem-15-T1-Q4.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22428658604","text":"from pprint import pprint\n\nfrom django.shortcuts import render\nfrom django.template.response import TemplateResponse\nfrom django.contrib.auth.models import AnonymousUser\n\nfrom datetime import datetime, date, timedelta\n\nfrom ..product.utils import products_visible_to_user\n\n# Create your views here.\ndef calendar(request, date=datetime.now().strftime(\"%m-%Y\")):\n user = AnonymousUser()\n release_days = []\n\n # Give a format to the date\n # Displays something like: Aug. 27, 2017, 2:57 p.m.\n date = datetime.strptime(date, \"%m-%Y\")\n formated_date = date.strftime(\"%B %Y\")\n next_month = add_one_month(date).strftime(\"%m-%Y\")\n previous_month = subtract_one_month(date).strftime(\"%m-%Y\")\n\n products = products_visible_to_user(user).filter(release_date__month=date.month, release_date__year=date.year).order_by('release_date')\n if len(products):\n release_days = [{\"products\": [products[0]], \"formated_day\": products[0].release_date.strftime(\"%A %d %B\").lstrip(\"0\").replace(\" 0\", \" \")}]\n i = 0\n for index, product in enumerate(products[1:]):\n if release_days[i][\"products\"][0].release_date != product.release_date:\n release_days.append({\"products\": [product], \"formated_day\": product.release_date.strftime(\"%A %d %B\").lstrip(\"0\").replace(\" 0\", \" \")})\n i += 1\n else:\n release_days[i][\"products\"].append(product)\n \n # for index, release_day in enumerate(release_days):\n # release_days[index] = release_day.strftime(\"%A %m %B\").lstrip(\"0\").replace(\" 0\", \" \")\n\n\n\n return TemplateResponse(request, 'events/calendar.html', {\n 'date': date,\n 'formated_date': formated_date,\n 'release_days': release_days,\n 'previous_month': previous_month,\n 'next_month': next_month\n })\n\ndef add_one_month(date):\n date = date.replace(day=1)\n date = date + timedelta(days=32)\n date = date.replace(day=1)\n return date\n\ndef subtract_one_month(date):\n date = date.replace(day=1)\n date = date - timedelta(days=1)\n date = date.replace(day=1)\n return date","repo_name":"stephecloutier/pfe","sub_path":"autre-monde/saleor/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9995142429","text":"#Урок 3_Задание 3.\ndef greatest_sum (arg_1, arg_2, arg_3):\n list1 = (arg_1, arg_2, arg_3)\n sort_list = sorted(list1)\n if sort_list[1] > sort_list[0]:\n a = sort_list[1] + sort_list[2]\n return a\n else:\n b = ('Невозможно определить 2 наибольших числа')\n return b\nprint(greatest_sum(arg_1 = int(input('Введите первое число: ')), arg_2 = int(input('Введите второе число: ')), arg_3 = int(input('Введите третье число: '))))","repo_name":"mdelinskaya/PhytonCourse","sub_path":"lesson3_task3.py","file_name":"lesson3_task3.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28451974801","text":"def checkCounts(sequenceA, sequenceB):\n\n gaps = 0\n mismatches = 0\n matches = 0\n\n for i in range(0, len(sequenceA)):\n if(sequenceA[i] == sequenceB[i]):\n matches += 1\n else:\n mismatches += 1\n\n gaps = sequenceA.count(\"-\") + sequenceB.count(\"-\")\n\n print(\"Matches : \" + str(matches))\n print(\"Mismatches : \" + str(mismatches))\n print(\"Gaps : \" + str(gaps))\n","repo_name":"gormacc/SequenceComparer","sub_path":"Counter.py","file_name":"Counter.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36309093640","text":"import math\nimport random\nimport copy\n\n#随机\ndef rand(x,y):\n return ((y - x) * random.random() + x)\n#将list构造为指定维度的矩阵([]):config的len为矩阵长度,每个元素值为对应维度的元素个数\ndef list_matrix(config):\n mat = [] #生成的矩阵\n temp = [0.0] #临时列表\n for i in reversed(config):\n mat = []\n mat.append(temp * i)\n temp = mat\n return mat[0]\n#sigmoid函数\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\n#Relu函数\ndef Relu(x):\n return (x if(x>0) else 0)\n \n#BP神经网络模型类\nclass BPNeuralNetwork:\n def __init__(self):\n self.input_cell = 0 #输入层节点数\n self.input_weight = [] #输入层权重列表 [[]]\n self.hidden_layer = 0 #隐藏层层数(可多层)\n self.hidden_cell = [] #隐藏层每层节点数列表\n self.hidden_weight = [] #隐藏层每层权重列表 [[[]]]\n self.hidden_bias = [] #隐藏层每层偏差列表 [[]]\n self.output_cell = 0 #输出层节点数\n self.output_bias = [] #输出层偏差列表\n #新建模型函数(int,list,int)\n def create_new(self,i_cell,h_cell,o_cell):\n self.input_cell = i_cell\n self.hidden_layer = len(h_cell) #h_cell列表长度即为hidden层数\n self.hidden_cell = h_cell\n self.output_cell = o_cell\n # print('self.input_cell:'+str(self.input_cell)+\"\\n\")\n # print('self.hidden_cell:'+str(self.hidden_cell)+\"\\n\")\n # print('self.hidden_layer:'+str(self.hidden_layer)+\"\\n\")\n # print('self.output_cell:'+str(self.output_cell)+\"\\n\")\n self.random_weight_bias(-2.0,2.0)\n self.print_para()\n #输出当前模型的参数\n def print_para(self):\n print('self.input_cell:'+str(self.input_cell)+\"\\n\")\n print('self.input_weight:'+str(self.input_weight)+\"\\n\")\n print('self.hidden_cell:'+str(self.hidden_cell)+\"\\n\")\n print('self.hidden_layer:'+str(self.hidden_layer)+\"\\n\")\n print('self.hidden_weight:'+str(self.hidden_weight)+\"\\n\")\n print('self.hidden_bias:'+str(self.hidden_bias)+\"\\n\")\n print('self.output_cell:'+str(self.output_cell)+\"\\n\")\n print('self.output_bias:'+str(self.output_bias)+\"\\n\")\n \n #自定义权重和偏差\n def set_weight_bias(self):\n self.input_weight = list_matrix([self.input_cell,self.hidden_cell[0]])\n self.hidden_weight = list_matrix([self.hidden_layer])\n self.hidden_bias = list_matrix([self.hidden_layer])\n self.output_bias = list_matrix([self.output_cell])\n self.hidden_bias[0] = list_matrix([self.hidden_cell[0]])\n \n self.input_weight = [[0.2,-0.3],[0.4,0.1],[-0.5,0.2]]\n self.hidden_bias = [[-0.4,0.2]]\n self.hidden_weight = [[[-0.3],[-0.2]]]\n self.output_bias = [0.1]\n self.print_para()\n #初始化随机权重和偏差\n def random_weight_bias(self,x,y):\n self.input_weight = list_matrix([self.input_cell,self.hidden_cell[0]])\n self.hidden_weight = list_matrix([self.hidden_layer])\n self.hidden_bias = list_matrix([self.hidden_layer])\n self.output_bias = list_matrix([self.output_cell])\n self.hidden_bias[0] = list_matrix([self.hidden_cell[0]])\n for i in range(self.input_cell): #input节点数\n for j in range(self.hidden_cell[0]): #hidden第一层节点数\n self.input_weight[i][j] = rand(x,y) #权重在x,y间取值\n self.hidden_bias[0][j] = rand(x,y)\n \n for i in range(self.hidden_layer): #hidden层数\n if i == (self.hidden_layer-1):\n self.hidden_weight[i] = list_matrix([self.hidden_cell[i],self.output_cell])\n for j in range(self.hidden_cell[i]): #hidden每层节点数\n for k in range(self.output_cell):\n self.hidden_weight[i][j][k] = rand(x,y)\n self.output_bias[k] = rand(x,y)\n else:\n self.hidden_weight[i] = list_matrix([self.hidden_cell[i],self.hidden_cell[i+1]])\n self.hidden_bias[i+1] = list_matrix([self.hidden_cell[i+1]])\n for j in range(self.hidden_cell[i]): #hidden每层节点数\n for k in range(self.hidden_cell[i+1]):\n self.hidden_weight[i][j][k] = rand(x,y)\n self.hidden_bias[i+1][k] = rand(x,y)\n \n #学习更新权重和偏差(self,[],[])\n def update_weight_bias(self,input_list,output_list,learn):\n #print('input:'+str(input_list)+' output:'+str(output_list))\n #前向计算每层(不包括input)每个节点的计算值\n out_value_list = copy.deepcopy(self.hidden_bias)\n temp_list = copy.deepcopy(self.output_bias)\n out_value_list.append(temp_list) #hidden层与output层有计算出的out值,结构正好与(hidden_bias+output_bias)对应\n #print('out_value_list:'+str(out_value_list))\n \n #input到第一层hidden计算\n for i in range(self.input_cell): \n for j in range(self.hidden_cell[0]):\n out_value_list[0][j] += (input_list[i] * self.input_weight[i][j]) \n if i == (self.input_cell - 1): #计算完成一个out值都进行非线性化\n out_value_list[0][j] = sigmoid(out_value_list[0][j])\n \n #print('out_value_list:'+str(out_value_list))\n #hidden层到output层计算\n for i in range(self.hidden_layer):\n if i == (self.hidden_layer - 1):\n for j in range(self.hidden_cell[i]):\n for k in range(self.output_cell):\n out_value_list[i+1][k] += (out_value_list[i][k] * self.hidden_weight[i][j][k])\n if j == (self.hidden_cell[i] - 1):\n out_value_list[i+1][k] = sigmoid(out_value_list[i+1][k])\n else:\n for j in range(self.hidden_cell[i]):\n for k in range(self.hidden_cell[i+1]):\n out_value_list[i+1][k] += (out_value_list[i][j] * self.hidden_weight[i][j][k])\n if j == (self.hidden_cell[i] - 1):\n out_value_list[i+1][k] = sigmoid(out_value_list[i+1][k])\n #print('out_value_list:'+str(out_value_list[-1]))\n \n err_value_list = copy.deepcopy(out_value_list) #错误list结构正好与out_value_list一样\n #print('err_value:'+str(err_value_list[-1]))\n #反向传递计算\n #output到hidden第一次反向传递\n for i in range(self.output_cell):\n err_value_list[-1][i] = 0.0 #\n err_value_list[-1][i] = out_value_list[-1][i]*(1 - out_value_list[-1][i])*(output_list[i] - out_value_list[-1][i])\n # print('error:'+str(output_list[i] - out_value_list[-1][i]))\n # print('self.hidden_bias:'+str(self.hidden_bias))\n # print('self.output_bias:'+str(self.output_bias))\n self.output_bias[i] += (learn * err_value_list[-1][i])\n # print('self.output_bias[i]:'+str(self.output_bias[i]))\n #hidden到input层的传递\n for ii in range(self.hidden_layer):\n i = self.hidden_layer - ii - 1 #反向处理\n #print('i:'+str(i))\n if i == (self.hidden_layer - 1):\n for j in range(self.hidden_cell[i]):\n err_value_list[i][j] = 0 #\n for k in range(self.output_cell):\n err_value_list[i][j] += (err_value_list[i+1][k] * self.hidden_weight[i][j][k] )\n self.hidden_weight[i][j][k] += (learn * err_value_list[i+1][k] * out_value_list[i][j])\n err_value_list[i][j] *= ((1 - out_value_list[i][j])*out_value_list[i][j])\n self.hidden_bias[i][j] += (learn * err_value_list[i][j])\n else:\n for j in range(self.hidden_cell[i]):\n err_value_list[i][j] = 0 #\n for k in range(self.hidden_cell[i+1]):\n err_value_list[i][j] += (err_value_list[i+1][k] * self.hidden_weight[i][j][k] )\n self.hidden_weight[i][j][k] += (learn * err_value_list[i+1][k] * out_value_list[i][j])\n err_value_list[i][j] *= ((1 - out_value_list[i][j])*out_value_list[i][j])\n self.hidden_bias[i][j] += (learn * err_value_list[i][j])\n #print('err_value_list:'+str(err_value_list))\n #self.print_para()\n #计算input层到hidden的权重更新\n for i in range(self.input_cell):\n for j in range(self.hidden_cell[0]):\n self.input_weight[i][j] += (learn * err_value_list[0][j] * input_list[i])\n \n #通过输入得到输出\n def get_result(self,input_dataset,output_dataset):\n index = 0\n for input_list in input_dataset:\n #前向计算每层(不包括input)每个节点的计算值\n out_value_list = copy.deepcopy(self.hidden_bias)\n temp_list = copy.deepcopy(self.output_bias)\n out_value_list.append(temp_list) #hidden层与output层有计算出的out值,结构正好与(hidden_bias+output_bias)对应\n #print('out_value_list:'+str(out_value_list))\n \n #input到第一层hidden计算\n for i in range(self.input_cell): \n for j in range(self.hidden_cell[0]):\n out_value_list[0][j] += (input_list[i] * self.input_weight[i][j]) \n if i == (self.input_cell - 1): #计算完成一个out值都进行非线性化\n out_value_list[0][j] = sigmoid(out_value_list[0][j])\n \n #print('out_value_list:'+str(out_value_list))\n #hidden层到output层计算\n for i in range(self.hidden_layer):\n if i == (self.hidden_layer - 1):\n for j in range(self.hidden_cell[i]):\n for k in range(self.output_cell):\n out_value_list[i+1][k] += (out_value_list[i][k] * self.hidden_weight[i][j][k])\n if j == (self.hidden_cell[i] - 1):\n out_value_list[i+1][k] = sigmoid(out_value_list[i+1][k])\n else:\n for j in range(self.hidden_cell[i]):\n for k in range(self.hidden_cell[i+1]):\n out_value_list[i+1][k] += (out_value_list[i][j] * self.hidden_weight[i][j][k])\n if j == (self.hidden_cell[i] - 1):\n out_value_list[i+1][k] = sigmoid(out_value_list[i+1][k])\n print(str(input_list)+' actual:'+str(output_dataset[index])+' forecast:'+str(out_value_list[-1])+\"\\n\")\n print()\n index += 1\n \n #训练函数\n def train(self,input_dataset,output_dataset,learn,limit):\n count = 0\n i = 0\n while 1:\n self.update_weight_bias(input_dataset[i],output_dataset[i],learn)\n #print('train_input:'+str(input_dataset[i]))\n #print('train_output:'+str(output_dataset[i]))\n count += 1\n i += 1\n if count < limit and i == len(input_dataset):\n i = 0\n elif count >= limit:\n break;\n#数据预处理(数据集,离散数据,特征列表)\ndef dataSetPreprocess(dataSet,discLabels,labelsList):\n labelsList_new = []\n for i in range(len(dataSet[0])):\n if i in discLabels: #完全离散数据\n rowNum = i\n valList = [] #存放离散数据\n for each in dataSet:\n if each[rowNum] not in valList:\n valList.append(each[rowNum])\n #print('valList:'+str(valList))\n for val in valList:\n labelsList_new.append(labelsList[rowNum]+'::'+val)\n for each in dataSet:\n replace = [] #用来替换离散变量的多个变量list\n for num in range(len(valList)): #添加与value种类数相同的元素\n replace.append(0)\n #print('len(valList)'+str(len(valList)))\n #print('replace:'+str(replace))\n for j in range(len(valList)):\n if valList[j] == each[rowNum]: #匹配到离散变量的值\n replace[j] = 1 #替换list对应位置置为1\n each[rowNum] = replace\n else:\n labelsList_new.append(labelsList[i])\n labelsList.clear()\n for each in labelsList_new:\n labelsList.append(each)\n #此处不可使用labelsList = labelsList_new,labelsList会被系统看作临时变量从而无法改变真正的labelsList值\n #将离散化数据处理产生的列表变成多个元素([[1,2],3,[4,5,6]] --> [1,2,3,4,5,6])\n dataSet_new = []\n for each in dataSet:\n tmp_list = []\n for i in each:\n if type(i).__name__ != 'list':\n tmp_list.append(float(i))\n else:\n for j in i:\n tmp_list.append(float(j))\n dataSet_new.append(tmp_list)\n dataSet.clear()\n for each in dataSet_new:\n dataSet.append(each)\n #不能用dataSet = dataSet_new\n #print('labelsList_new:'+str(labelsList_new))\n #print('dataSet:'+str(dataSet))\n\n#需要人工预处理的可量化数据如等级,程度描述,真假等\n#(数据集,要量化的数据的列号,量化参考字典)\ndef dataSetQuantize(dataSet,row,quantizeDict):\n for each in dataSet:\n each[row] = quantizeDict[each[row]] #取对应的量化结果\n\n#超过[0,1]范围的连续数据等比缩放限制到[0,1]内\ndef dataSetLimit(dataSet,row,area):\n min = area[0]\n max = area[-1]\n for each in dataSet:\n each[row] = (float(each[row]) - min)/(max - min)\n\nif __name__ == '__main__':\n BP1 = BPNeuralNetwork()\n BP1.create_new(2,[5,5],1)\n BP1.train([[1,1],[0,1],[1,0],[0,0]],[[0],[1],[1],[0]],0.05,1000000)\n BP1.get_result([[1,1],[1,0],[0,1],[0,0]],[0,1,1,0])\n # BP1.set_weight_bias()\n # BP1.update_weight_bias([1,0,1],[1],0.9)\n","repo_name":"SeptinaryAI/graduation_project_by_python","sub_path":"neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":14459,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23622300537","text":"import requests\nimport wget\nimport tarfile\nimport polib\nimport os\nimport json\nimport shutil\nfrom multiprocessing import Pool, Manager\nimport zipfile\nimport polib\nfrom datetime import datetime\nimport argparse\nimport time\n\npackages = [\n {'url': 'https://pypi.infra.wish.com/api/package/wishstrings/',\n 'folder_name': 'wishstrings', 'is_python': True},\n {'url': 'https://pypi.infra.wish.com/api/package/merchantstrings/',\n 'folder_name': 'merchantstrings', 'is_python': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/mmstrings',\n 'folder_name': 'mmstrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/bluestrings',\n 'folder_name': 'bluestrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/merchantstrings',\n 'folder_name': 'merchantstrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/wishlocalwebstrings',\n 'folder_name': 'wishlocalwebstrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/wishwebcozystrings',\n 'folder_name': 'wishwebcozystrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/wpsuistrings',\n 'folder_name': 'wpsuistrings', 'is_javascript': True},\n {'url': 'https://npm.infra.wish.com/-/verdaccio/sidebar/@ContextLogic/legostrings',\n 'folder_name': 'legostrings', 'is_javascript': True}]\n\nwork_dir = '/Users/renchen/Work/playground/allstrings'\nxtm_token = os.environ.get('XTM_TOKEN')\n\nheaders = {\n 'Authorization': xtm_token\n}\n\nxtm_uri = \"https://wish.xtm-intl.com/project-manager-api-rest\"\nprojects_uri = '/projects'\nproject_uri = '/projects/{0}'\ndownload_source_uri = '/projects/{0}/files/sources/download'\n\nartifacts_dir = os.path.join(work_dir, 'artifacts')\noutput_dir = os.path.join(work_dir, 'output')\nlocales_json_dir = os.path.join(output_dir, 'locales.json')\nprojects_json_dir = os.path.join(output_dir, 'projects.json')\nall_projects_sources_dir = os.path.join(work_dir, 'all_projects')\nsources_json_dir = os.path.join(output_dir, 'sources.json')\ntranslations_output_dir = os.path.join(output_dir, 'translations')\nbuild_json_dir = os.path.join(output_dir, 'build.json')\nrepo_dir = None\n\nversions = {}\n\ndef setup_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n else:\n shutil.rmtree(dir)\n\ndef init():\n global artifacts_dir, output_dir, locales_json_dir, projects_json_dir, all_projects_sources_dir, sources_json_dir,\\\n build_json_dir, translations_output_dir, headers\n artifacts_dir = os.path.join(work_dir, 'artifacts')\n output_dir = os.path.join(work_dir, 'output')\n locales_json_dir = os.path.join(output_dir, 'locales.json')\n projects_json_dir = os.path.join(output_dir, 'projects.json')\n all_projects_sources_dir = os.path.join(work_dir, 'all_projects')\n sources_json_dir = os.path.join(output_dir, 'sources.json')\n translations_output_dir = os.path.join(output_dir, 'translations')\n build_json_dir = os.path.join(output_dir, 'build.json')\n dirs = [artifacts_dir, output_dir, all_projects_sources_dir, translations_output_dir]\n for dir in dirs:\n if dir:\n setup_dir(dir)\n headers = {\n 'Authorization': xtm_token\n }\n\n\ndef download(url):\n local_filename = url.split('/')[-1]\n local_file_dir = os.path.join(artifacts_dir, local_filename)\n print(f'Downloading {url} to {local_file_dir}')\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(local_file_dir, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n return local_file_dir\n\n\ndef artifacts_helper(pkg_url_obj):\n resp = requests.get(pkg_url_obj['url'])\n resp_json = json.loads(resp.content.decode())\n if 'is_python' in pkg_url_obj:\n url = resp_json['packages'][0]['url']\n dest_dir = os.path.join(artifacts_dir, pkg_url_obj['folder_name'])\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n filepath = download(url)\n print('Download successful')\n tar = tarfile.open(filepath)\n untar_dir = os.path.join(dest_dir, 'untar')\n if not os.path.exists(untar_dir):\n os.makedirs(untar_dir)\n tar.extractall(untar_dir)\n parse_python_artifact_content(os.path.basename(filepath).split('.tar.gz')\n [0], pkg_url_obj['folder_name'], untar_dir)\n if 'is_javascript' in pkg_url_obj:\n dest_dir = os.path.join(\n artifacts_dir, pkg_url_obj['folder_name'] + '/')\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n url = resp_json['latest']['dist']['tarball']\n filepath = download(url)\n print('Download successful')\n tar = tarfile.open(filepath)\n untar_dir = os.path.join(dest_dir, 'untar')\n if not os.path.exists(untar_dir):\n os.makedirs(untar_dir)\n tar.extractall(untar_dir)\n parse_javacript_artifact_content(untar_dir, pkg_url_obj['folder_name'])\n\n\ndef merge(strings, content):\n for key, value in content.items():\n strings[key] = value\n\ndef merge_with_append(strings, content):\n for key, value in content.items():\n if key == 'Payment':\n print(value)\n if key in strings:\n strings[key] = strings[key] + value\n else:\n strings[key] = value\n\n\ndef artifacts():\n print('Start building strings.json for artifacts')\n p = Pool(8)\n p.map(artifacts_helper, packages)\n strings = {}\n for pkg in packages:\n translations_dir = os.path.join(\n artifacts_dir, pkg['folder_name'], 'translations')\n locales = os.listdir(translations_dir)\n for locale in locales:\n strings_dir = os.path.join(\n translations_dir, locale, 'strings.json')\n with open(strings_dir, 'r') as f:\n data = json.load(f)\n if locale in strings:\n merge(strings[locale], data)\n else:\n strings[locale] = data\n\n for locale, value in strings.items():\n strings_dir = os.path.join(translations_output_dir, locale)\n if not os.path.exists(strings_dir):\n os.makedirs(strings_dir)\n with open(os.path.join(strings_dir, 'strings.json'), 'w', encoding='utf8') as f:\n json.dump(value, f, ensure_ascii=False, sort_keys=True)\n\n with open(locales_json_dir, 'w') as f:\n l = ['en-US'] + list(strings.keys())\n l.sort()\n json.dump(l, f, sort_keys=True)\n print('Successfully built all translations strings.json')\n\n\ndef parse_javacript_artifact_content(untar_dir, pkg_folder_name):\n print('Parsing contents at ' + untar_dir)\n locales_folder = os.path.join(\n untar_dir, 'package')\n files = os.listdir(locales_folder)\n for file in files:\n if file.startswith('.') or file == 'package.json':\n continue\n strings = {}\n locale = file.split('.')[0]\n raw_file_path = os.path.join(locales_folder, locale + '.raw.json')\n with open(raw_file_path, 'r') as f:\n raw_file_data = json.load(f)\n for source_string, value in raw_file_data.items():\n if not source_string:\n continue\n\n context = None\n if '\\u0004' in source_string:\n splitted = source_string.split('\\u0004')\n context = splitted[0]\n source_string = splitted[1]\n normalized_locale = locale.replace('_', '-')\n if value[0]:\n try:\n if type(value[1]) is list:\n for v in value[1]:\n strings[v] = {\n 'is_translated': True, 'source_string': source_string, 'locale': normalized_locale,\n 'context': context, 'package': pkg_folder_name\n }\n else:\n for v in value[1:]:\n strings[v] = {\n 'is_translated': True, 'source_string': source_string, 'locale': normalized_locale,\n 'context': context, 'package': pkg_folder_name\n }\n except Exception as e:\n print(raw_file_path)\n print(value)\n raise e\n else:\n strings[value[1]] = {\n 'is_translated': True, 'source_string': source_string, 'locale': normalized_locale,\n 'context': context, 'package': pkg_folder_name\n }\n strings_dir = os.path.join(\n artifacts_dir, pkg_folder_name, 'translations', normalized_locale)\n if not os.path.exists(strings_dir):\n os.makedirs(strings_dir)\n with open(os.path.join(strings_dir, 'strings.json'), 'w', encoding='utf8') as f:\n json.dump(strings, f, ensure_ascii=False, sort_keys=True)\n\n\ndef parse_python_artifact_content(foldername, pkg_folder_name, untar_dir):\n print('Parsing contents at ' + untar_dir)\n locales_folder = os.path.join(\n untar_dir, foldername, pkg_folder_name, 'locale')\n locales = os.listdir(locales_folder)\n for locale in locales:\n strings = {}\n if locale.startswith('.'):\n continue\n wish_mo_file_path = os.path.join(\n locales_folder, locale, 'LC_MESSAGES', 'wish.mo')\n mo = polib.mofile(wish_mo_file_path)\n for entry in mo:\n if entry.msgstr:\n strings[entry.msgstr] = {\n 'is_translated': True, 'source_string': entry.msgid, 'locale': locale, 'context': entry.msgctxt, 'package': pkg_folder_name}\n elif entry.msgstr_plural:\n for index in entry.msgstr_plural:\n strings[entry.msgstr_plural[index]] = {\n 'is_translated': True, 'source_string': entry.msgid, 'locale': locale, 'context': entry.msgctxt, 'package': pkg_folder_name}\n else:\n print('ERROR!!')\n print(entry)\n return\n strings_dir = os.path.join(\n artifacts_dir, pkg_folder_name, 'translations', locale)\n if not os.path.exists(strings_dir):\n os.makedirs(strings_dir)\n with open(os.path.join(strings_dir, 'strings.json'), 'w', encoding='utf8') as f:\n json.dump(strings, f, ensure_ascii=False, sort_keys=True)\n\n\ndef get_projects():\n print('Getting all projects from XTM')\n resp = requests.get(xtm_uri + projects_uri, headers=headers)\n total_count = int(resp.headers['xtm-total-items-count'])\n projects = json.loads(resp.content.decode())\n page = 2\n while len(projects) < total_count:\n resp = requests.get(xtm_uri + projects_uri,\n headers=headers, params={'page': page})\n projects = projects + json.loads(resp.content.decode())\n page = page + 1\n print('Successfully fetched {0} projects'.format(len(projects)))\n return projects\n\n\ndef description_json(description):\n try:\n return json.loads(description)\n except:\n return {}\n\n\ndef projects_json_job(project):\n projects_json = {}\n resp = requests.get(\n xtm_uri + project_uri.format(project['id']), headers=headers)\n project = json.loads(resp.content.decode())\n projects_json[project['name']] = {\n 'name': project['name'],\n 'id': project['id'],\n 'source_locale': project['sourceLanguage'],\n 'target_locales': project['targetLanguages']\n }\n if 'description' in project:\n projects_json[project['name']] = {\n **projects_json[project['name']], **description_json(project['description'])}\n return projects_json\n\n\ndef projects_json(projects):\n print('Start building projects.json')\n projects_json = {}\n p = Pool(8)\n results = p.map(projects_json_job, projects)\n p.close()\n p.join()\n for ret in results:\n merge(projects_json, ret)\n with open(projects_json_dir, 'w', encoding='utf8') as f:\n json.dump(projects_json.copy(), f, ensure_ascii=False, sort_keys=True)\n print('Successfully saved projects.json to ' + projects_json_dir)\n return projects_json\n\n\ndef source_files_job(project):\n data = requests.get(\n xtm_uri + download_source_uri.format(project['id']), headers=headers).content\n download_dest_path = os.path.join(\n all_projects_sources_dir, project['name'])\n print('Saving ' + project['name'] + ' to dir: ' + download_dest_path)\n if not os.path.exists(download_dest_path):\n os.makedirs(download_dest_path)\n with open(os.path.join(download_dest_path, 'source.zip'), 'wb') as f:\n f.write(data)\n\n\ndef source_files(projects):\n p = Pool(8)\n p.map(source_files_job, projects)\n\n\ndef sources_json_job(source_path, dir):\n try:\n strings = {}\n if not zipfile.is_zipfile(source_path):\n print(source_path + ' is not a valid zip file. Skipping...')\n return strings\n zf = zipfile.ZipFile(source_path)\n files = zf.namelist()\n for file in files:\n if file.endswith('.po'):\n data = zf.read(file)\n po = polib.pofile(data.decode())\n for entry in po:\n if entry.msgid == 'Payment':\n print(dir)\n if entry.msgid not in strings:\n strings[entry.msgid] = [{\n 'project': dir,\n 'context': entry.msgctxt,\n 'plurals': entry.msgid_plural\n }]\n else:\n strings[entry.msgid].append({\n 'project': dir,\n 'context': entry.msgctxt,\n 'plurals': entry.msgid_plural\n })\n if entry.msgid_plural:\n if entry.msgid_plural not in strings:\n strings[entry.msgid_plural] = [{\n 'project': dir,\n 'context': entry.msgctxt\n }]\n else:\n strings[entry.msgid_plural].append({\n 'project': dir,\n 'context': entry.msgctxt\n })\n return strings\n except Exception as e:\n print(e)\n raise e\n\n\ndef sources_json(projects):\n print('Start building sources.json')\n source_files(projects)\n dirs = os.listdir(all_projects_sources_dir)\n p = Pool(8)\n\n args = [(os.path.join(all_projects_sources_dir, dir, 'source.zip'), dir)\n for dir in dirs]\n results = p.starmap(sources_json_job, args)\n p.close()\n p.join()\n strings = {}\n for ret in results:\n merge_with_append(strings, ret)\n with open(sources_json_dir, 'w', encoding='utf8') as f:\n json.dump(strings, f, ensure_ascii=False, sort_keys=True)\n print('Successfully saved sources.json to ' + sources_json_dir)\n\n\ndef copy():\n data_dir = os.path.join(repo_dir, 'src/data')\n translations_dir = os.path.join(repo_dir, 'public/translations')\n shutil.copy(projects_json_dir, data_dir)\n print('Copied artifact: ' + projects_json_dir, data_dir)\n shutil.copy(sources_json_dir, data_dir)\n print('Copied artifact: ' + sources_json_dir, data_dir)\n shutil.copy(locales_json_dir, data_dir)\n print('Copied artifact: ' + locales_json_dir, data_dir)\n shutil.copy(build_json_dir, data_dir)\n print('Copied artifact: ' + build_json_dir, data_dir)\n shutil.rmtree(translations_dir)\n shutil.copytree(translations_output_dir, translations_dir)\n print('Copied artifact: ' + translations_output_dir, translations_dir)\n\n\ndef build_stats():\n print('Start building build.json')\n obj = {\n 'last_build_time': datetime.now().isoformat(),\n 'versions': {}\n }\n for pkg in packages:\n resp = requests.get(pkg['url'])\n resp_json = json.loads(resp.content.decode())\n if 'is_python' in pkg:\n obj['versions'][pkg['folder_name']\n ] = resp_json['packages'][0]['version']\n if 'is_javascript' in pkg:\n obj['versions'][pkg['folder_name']] = resp_json['latest']['version']\n with open(build_json_dir, 'w') as f:\n json.dump(obj, f, sort_keys=True)\n print('Successfuly saved build.json' + build_json_dir)\n\n\ndef main():\n start = time.time()\n parser = argparse.ArgumentParser(\n description='Sync sources and translations')\n parser.add_argument('-w', '--work_dir',\n help='Working directory', required=True)\n parser.add_argument('-r', '--repo_dir',\n help='Repository directory', required=True)\n parser.add_argument('-t', '--token', help='XTM token', required=True)\n args = vars(parser.parse_args())\n global work_dir, xtm_token, repo_dir\n work_dir = args['work_dir']\n xtm_token = args['token']\n repo_dir = args['repo_dir']\n init()\n\n projects = get_projects()\n projects_json(projects)\n sources_json(projects)\n build_stats()\n artifacts()\n copy()\n print('Done')\n print('Took: ' + str(time.time() - start) + ' seconds')\n\nmain()\n","repo_name":"rsun-wish/strings-search","sub_path":"scripts/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":17793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32451378094","text":"import discord\nimport datetime\nfrom discord import Embed\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix='b:')\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n print(\"bot is ready\")\n\n\n@client.command()\nasync def ping(ctx):\n await ctx.send(\"pong\")\n print(ctx.author)\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.errors.CheckFailure):\n await ctx.send('You do not have the correct role for this command.')\n\n\n#voting-----------------------------------\n# vote_start = True\n# separated_vote = True\n# fase_2 = True\n\nvote_start = False\nseparated_vote = False\nfase_2 = False\ncontinue_vote = False\n\n# dict_for_voting = {274590558528471053: ['goh', 'ruflees', 1, '274590558528471053'], 745765600936067213: ['hxh', 'ruflees test', 2, '745765600936067213'], 108538100686274560: ['test', 'Brunao', 3, '108538100686274560']}\n# voters = {745765600936067213: ['hxh', 'ruflees test', '745765600936067213'], 108538100686274560: ['test', 'Brunao', '108538100686274560']}\ndict_for_voting = {}\nvoters = {}\ntie_contenders = {}\ntie_votes = {}\ncontinue_voters = {}\nwinner = {}\n\nvote_theme = []\n\nyes_votes = 2\nno_votes = 3\n\ndef update_winner():\n if yes_votes >= no_votes:\n winner.update({\"yes\": [yes_votes]})\n winner.update({\"no\": [no_votes]})\n \n else:\n winner.update({\"no\": [no_votes]})\n winner.update({\"yes\": [yes_votes]})\n\n#starts a voting session-----------------------------------------------\n@client.command()\n@commands.has_role('Bartender')\nasync def start(ctx, arg=\"off\", *, theme):\n global vote_start, separated_vote, vote_theme\n if vote_start == False:\n if arg.lower() == \"on\":\n separated_vote = True\n vote_start = True\n await ctx.send('A separated voting session has been started, please add your contenders.')\n \n embed = discord.Embed(\n colour = discord.Colour.blue()\n )\n\n embed.set_author(name=\"The theme is: %s\"%(theme))\n\n await ctx.send(embed=embed)\n print(\"A voting session has been started\")\n\n x = datetime.datetime.now()\n\n themes_file = open(\"data/themes/themes.txt\", \"a\")\n themes_file.write(\"# %s/%s/%s: %s \"%(x.strftime(\"%d\"), x.strftime(\"%m\"), x.strftime(\"%Y\"), theme))\n themes_file.close()\n\n contenders_file = open(\"data/contenders/%s\"%(theme), \"a\")\n voters_file = open(\"data/voters/%s\"%(theme), \"a\")\n contenders_file.write(\" # %s/%s/%s %s &: \"%(x.strftime(\"%d\"), x.strftime(\"%m\"), x.strftime(\"%Y\"), theme))\n voters_file.write(\" # %s/%s/%s %s &: \"%(x.strftime(\"%d\"), x.strftime(\"%m\"), x.strftime(\"%Y\"), theme))\n contenders_file.close()\n voters_file.close()\n\n vote_theme.append(theme)\n print(vote_theme)\n\n\n elif vote_start == True:\n if separated_vote == True:\n await ctx.send('The separated voting session has already been started.')\n elif separated_vote == False:\n await ctx.send('The voting session has already been started.')\n\n\n#stops the voting session------------------------------------------------------\n@client.command()\n@commands.has_role('Bartender')\nasync def stop(ctx):\n global vote_start,separated_vote, fase_2\n if vote_start == True:\n await ctx.send(\"The voting session has been stopped.\")\n vote_start = False\n fase_2 = False\n separated_vote = False\n print(\"A voting session has been ended.\")\n else:\n await ctx.send('There is no voting session in progress.')\n\n\n#creates a contender called in by the author------------------------------------------\n@client.command()\nasync def add(ctx, *, name):\n global dict_for_voting\n name_checker = [dict_for_voting[x][0].lower() for x in dict_for_voting]\n if vote_start == False:\n await ctx.send(\"There is no voting session in progress.\")\n\n elif str(name.lower()) in name_checker:\n await ctx.send(\"A contender with the same name has already been added.\")\n\n elif vote_start == True:\n if separated_vote == True and fase_2 == True:\n await ctx.send(\"%s the contender adding fase is already done.\"%ctx.author.mention)\n\n else:\n await ctx.send(\"%s added %s\" %(ctx.author.name, name))\n if ctx.author.nick == None:\n dict_for_voting.update({ctx.author.id : [str(name), str(ctx.author.name), 0, str(ctx.author.id)]})\n else:\n dict_for_voting.update({ctx.author.id : [str(name), str(ctx.author.nick), 0, str(ctx.author.id)]})\n\n contenders_file = open(\"data/contenders/%s\"%(vote_theme[0]), \"a\")\n contenders_file.write(\"%s, %s, %s, %s; \" %(str(name), str(ctx.author.name), 0, str(ctx.author.id)))\n contenders_file.close()\n\n print(dict_for_voting)\n\n\n#changes to the voting phase--------------------------------------------------\n@client.command(aliases=['change'])\n@commands.has_role('Bartender')\nasync def separate(ctx):\n global fase_2\n if separated_vote == False and vote_start == True:\n await ctx.send(\"The voting session isn't separate.\")\n\n elif separated_vote == True and fase_2 == False:\n await ctx.send(\"@everyone We are now changing to the voting phase.\")\n fase_2 = True\n \n else:\n if vote_start == False:\n await ctx.send('There is no voting session in progress.')\n else:\n await ctx.send('We are already in the voting phase')\n\n\n#returns the vote list---------------------------------------------------\n@client.command()\nasync def votelist(ctx):\n embed = discord.Embed(title=\"The Votelist is:\")\n for values in dict_for_voting:\n msg_to_add = \"By \" + str(dict_for_voting[values][1]) + \" and has: \" + str(dict_for_voting[values][2]) + \" vote(s)\"\n embed.add_field(name=str(dict_for_voting[values][0]), value=str(msg_to_add), inline=False)\n \n print(dict_for_voting)\n print(voters)\n \n await ctx.send(embed=embed)\n\n\n#casts a vote to the title called in by the user---------------------------------\n@client.command()\nasync def vote(ctx, *, name):\n global dict_for_voting, voters\n if vote_start == True:\n if ctx.author.id not in voters:\n if separated_vote == True and fase_2 == False:\n await ctx.send(\"We aren't in the voting phase yet.\")\n\n elif ctx.author.id in dict_for_voting:\n if str(name).lower() == dict_for_voting[ctx.author.id][0].lower():\n await ctx.send(\"You cannot vote for yourself.\")\n \n else:\n for value in dict_for_voting:\n if str(name).lower() == dict_for_voting[value][0].lower():\n print(\"test\")\n await ctx.send(\"%s voted for: %s\"%(ctx.author.name, dict_for_voting[value][0]))\n if ctx.author.nick == None:\n voters.update({ctx.author.id : [dict_for_voting[value][0], str(ctx.author.name), str(dict_for_voting[value][3]), str(ctx.author.id)]})\n dict_for_voting[value][2] += 1\n else:\n voters.update({ctx.author.id : [dict_for_voting[value][0], str(ctx.author.nick), str(dict_for_voting[value][3]), str(ctx.author.id)]})\n dict_for_voting[value][2] += 1\n\n voters_file = open(\"data/voters/%s\"%(vote_theme[0]), \"a\")\n voters_file.write(\"%s, %s, %s, %s, %s, 'ok'; \" %(dict_for_voting[value][0], ctx.author.name, dict_for_voting[value][2], dict_for_voting[value][3], str(ctx.author.id)))\n voters_file.close()\n\n print('%s voted for %s'%(ctx.author.id, name))\n print(dict_for_voting)\n print(voters)\n\n else:\n await ctx.send(\"you have already voted\")\n\n else:\n await ctx.send(\"There is no voting session in progress.\")\n\n\n@client.command(aliases=['votename'])\nasync def vote_name(ctx, member:discord.Member):\n global dict_for_voting, voters\n if vote_start == True:\n if ctx.author.id not in voters:\n if separated_vote == True and fase_2 == False:\n await ctx.send(\"We aren't in the voting phase yet.\")\n \n elif int(member.id) in dict_for_voting:\n if str(ctx.author.id) == dict_for_voting[member.id][3]:\n await ctx.send(\"You cannot vote for yourself.\")\n\n else:\n await ctx.send(\"%s voted for: %s\"%(ctx.author.name, dict_for_voting[int(member.id)][0]))\n \n if ctx.author.nick == None:\n voters.update({ctx.author.id : [dict_for_voting[int(member.id)][0], str(ctx.author.name), str(dict_for_voting[int(member.id)][3]), str(ctx.author.id)]})\n dict_for_voting[int(member.id)][2] += 1\n else:\n voters.update({ctx.author.id : [dict_for_voting[int(member.id)][0], str(ctx.author.nick), str(dict_for_voting[int(member.id)][3]), str(ctx.author.id)]})\n dict_for_voting[int(member.id)][2] += 1\n\n voters_file = open(\"data/voters/%s\"%(vote_theme[0]), \"a\")\n voters_file.write(\"%s, %s, %s, %s, %s, 'ok'; \" %(dict_for_voting[int(member.id)][0], ctx.author.name, dict_for_voting[int(member.id)][2], dict_for_voting[int(member.id)][3], str(ctx.author.id)))\n voters_file.close()\n\n print('%s voted for %s'%(ctx.author.id, member.id))\n print(dict_for_voting)\n print(voters)\n\n else:\n await ctx.send(\"you have already voted\")\n\n else:\n await ctx.send(\"There is no voting session in progress.\")\n\n\n#removes the vote casted by the user who called it---------------------------------------\n@client.command(aliases=['remove'])\nasync def remove_vote(ctx):\n global voters, dict_for_voting\n if ctx.author.id in voters:\n for value in dict_for_voting:\n if voters[ctx.author.id][0] == dict_for_voting[value][0]:\n await ctx.send(\"%s your vote has been removed\"%(ctx.author.name))\n dict_for_voting[value][2] -= 1\n del voters[ctx.author.id]\n\n voters_file = open(\"data/voters/%s\"%(vote_theme[0]), \"a\")\n voters_file.write(\"%s, %s, %s, %s, %s, 'removed'; \" %(dict_for_voting[value][0], dict_for_voting[value][1], dict_for_voting[value][2], dict_for_voting[value][3], ctx.author.id))\n voters_file.close()\n \n else:\n await ctx.send(\"%s you haven't voted yet or you have already removed your vote\"%(ctx.author.name))\n\n\n#removes the contender called in by the author--------------------------------------------\n@client.command(aliases=['removecontender', 'rc'])\n@commands.has_role('Bartender')\nasync def remove_contender(ctx, member:discord.Member):\n global voters\n print(member.id)\n delete_id = []\n for value in voters:\n if voters[value][2] == dict_for_voting[member.id][3]:\n delete_id.append(voters[value][3])\n\n print(delete_id)\n\n for value in delete_id:\n del voters[int(value)]\n\n print(\"test\")\n\n contenders_file = open(\"data/contenders/%s\"%(vote_theme[0]), \"a\")\n contenders_file.write(\"%s, %s, %s, %s, 'removed'; \" %(str(dict_for_voting[member.id][0]), str(dict_for_voting[member.id][1]), 0, str(dict_for_voting[member.id][3])))\n contenders_file.close()\n\n await ctx.send('The contender %s by %s has been removed'%(dict_for_voting[member.id][0] ,dict_for_voting[member.id][1]))\n\n del dict_for_voting[member.id]\n\n print(dict_for_voting)\n print(voters)\n\n\n#joins both contenders into a single contender called in by the author--------------------------------------------------\n@client.command()\n@commands.has_role('Bartender')\nasync def join(ctx, member:discord.Member, member2:discord.Member):\n global dict_for_voting, voters\n await ctx.send(\"Joining %s with %s.\"%(dict_for_voting[int(member.id)][0], dict_for_voting[int(member2.id)][0]))\n votes_to_add = 0\n votes_to_add += dict_for_voting[int(member2.id)][2]\n for value in voters:\n if voters[value][2] == dict_for_voting[int(member2.id)][3]:\n voters[value][2] = dict_for_voting[int(member.id)][3]\n voters[value][0] = dict_for_voting[int(member.id)][0]\n del dict_for_voting[int(member2.id)]\n dict_for_voting[int(member.id)][2] += votes_to_add\n print(dict_for_voting, voters)\n\n contenders_file = open(\"data/contenders/%s\"%(vote_theme[0]), \"a\")\n contenders_file.write(\"%s, %s, %s, %s, 'removed'; \" %(str(dict_for_voting[member2.id][0]), str(dict_for_voting[member2.id][1]), 0, str(dict_for_voting[member2.id][3])))\n contenders_file.close()\n\n voters_file = open(\"data//%s\"%(vote_theme[0]), \"a\")\n voters_file.write(\"%s, %s, %s, %s; \" %(dict_for_voting[int(member.id)][0], dict_for_voting[int(member.id)][1], dict_for_voting[int(member.id)][2], dict_for_voting[int(member.id)][3]))\n voters_file.close()\n\n\n# Decides the winner --------------------------------------------------------------------------------------------------------\n@client.command(aliases=['finish'])\n@commands.has_role('Bartender')\nasync def decide_winner(ctx):\n global vote_start, separated_vote, fase_2\n if vote_start == True:\n if separated_vote == True and fase_2 == False:\n await ctx.send(\"You are still on adding phase please use b:separate to change phases\")\n \n else:\n dict_for_voting_sorted = sorted(dict_for_voting.items(), key=lambda x: x[1], reverse=True)\n \n print(dict_for_voting_sorted)\n await ctx.send(\"test\")\n\n embed = discord.Embed(\n colour = discord.Colour.blue()\n )\n embed.set_author(name=\"These are the top #3\")\n msg = [\n (\"#1 %s\"%(dict_for_voting_sorted[0][1][0]), \"by <@%s> with %s votes\"%(dict_for_voting_sorted[0][1][3], dict_for_voting_sorted[0][1][2]), False),\n (\"#2 %s\"%(dict_for_voting_sorted[1][1][0]), \"by <@%s> with %s votes\"%(dict_for_voting_sorted[1][1][3], dict_for_voting_sorted[1][1][2]), False),\n (\"#3 %s\"%(dict_for_voting_sorted[2][1][0]), \"by <@%s> with %s votes\"%(dict_for_voting_sorted[2][1][3], dict_for_voting_sorted[2][1][2]), False),\n ]\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n\n await ctx.send(embed=embed)\n\n vote_start = False\n separated_vote = False\n fase_2 = False\n \n \n else:\n await ctx.send(\"There is no voting session in progress\")\n\n\n# Clears dict_for_voting -------------------------------------------------------------------------------------------------------------------\n@client.command()\n@commands.has_role('Bartender')\nasync def clear(ctx):\n global dict_for_voting, vote_theme\n dict_for_voting.clear()\n voters.clear()\n vote_theme.clear()\n\n print(vote_theme)\n await ctx.send(\"The voting list has been cleared.\")\n\n\n@client.command(aliases=['continue'])\n@commands.has_role('Bartender')\nasync def _continue(ctx, arg):\n global continue_vote, yes_votes, no_votes, winner, continue_voters\n if arg.lower() == \"start\":\n if continue_vote == True:\n await ctx.send(\"A voting session to continue has already been started.\")\n\n elif continue_vote == False:\n await ctx.send(\"A voting session to continue has been started. Use b:yes or b:no to vote.\")\n continue_vote = True\n\n elif arg.lower() == \"stop\":\n if continue_vote == False:\n await ctx.send(\"No voting session to continue has been started.\")\n\n elif continue_vote == True:\n continue_vote = False\n\n update_winner()\n\n winner_sorted = sorted(winner.items(), key=lambda x: x[1], reverse=True)\n\n print(winner_sorted)\n\n await ctx.send(\"A voting session to continue has been ended.\")\n\n\n embed = discord.Embed(\n colour = discord.Colour.red()\n )\n embed.set_author(name=\"Continue voting results:\")\n\n msg = [\n (\"#1 %s\"%(winner_sorted[0][0]), \"with: %s votes\"%(winner_sorted[0][1][0]), False),\n (\"#2 %s\"%(winner_sorted[1][0]), \"with: %s votes\"%(winner_sorted[1][1][0]), False),\n ]\n\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n\n await ctx.send(embed=embed)\n\n winner.clear()\n winner_sorted.clear()\n continue_voters.clear()\n\n yes_votes = 0\n no_votes = 0\n\n print(winner, winner_sorted, continue_voters, yes_votes, no_votes)\n\n\n@client.command()\nasync def yes(ctx):\n global continue_voters, continue_vote, yes_votes\n if continue_vote == True:\n if ctx.author.id not in continue_voters:\n if ctx.author.nick == None:\n continue_voters.update({ctx.author.id : [\"yes\", str(ctx.author.name), str(ctx.author.id)]})\n yes_votes += 1\n else:\n continue_voters.update({ctx.author.id : [\"yes\", str(ctx.author.nick), str(ctx.author.id)]})\n yes_votes += 1\n\n print(yes_votes, continue_voters)\n embed_1 = discord.Embed(\n colour = discord.Colour.blue()\n )\n\n embed_1.set_author(name=\"%s you voted YES\"%(continue_voters[ctx.author.id][1]))\n await ctx.send(embed=embed_1)\n\n update_winner()\n\n winner_sorted = sorted(winner.items(), key=lambda x: x[1], reverse=True)\n\n embed = discord.Embed(\n colour = discord.Colour.red()\n )\n embed.set_author(name=\"Continue voting results:\")\n\n msg = [\n (\"#1 %s\"%(winner_sorted[0][0]), \"with: %s votes\"%(winner_sorted[0][1][0]), False),\n (\"#2 %s\"%(winner_sorted[1][0]), \"with: %s votes\"%(winner_sorted[1][1][0]), False),\n ]\n\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n\n await ctx.send(embed=embed)\n\n else:\n await ctx.send(\"You have already voted.\")\n\n else:\n await ctx.send(\"There is no voting session to continue in progress.\")\n\n\n@client.command()\nasync def no(ctx):\n global continue_voters, continue_vote, no_votes\n if continue_vote == True:\n if ctx.author.id not in continue_voters:\n if ctx.author.nick == None:\n continue_voters.update({ctx.author.id : [\"no\", str(ctx.author.name), str(ctx.author.id)]})\n no_votes += 1\n else:\n continue_voters.update({ctx.author.id : [\"no\", str(ctx.author.nick), str(ctx.author.id)]})\n no_votes += 1\n\n print(yes_votes, continue_voters)\n embed_1 = discord.Embed(\n colour = discord.Colour.blue()\n )\n\n embed_1.set_author(name=\"%s you voted NO\"%(continue_voters[ctx.author.id][1]))\n await ctx.send(embed=embed_1)\n\n update_winner()\n\n winner_sorted = sorted(winner.items(), key=lambda x: x[1], reverse=True)\n\n embed = discord.Embed(\n colour = discord.Colour.red()\n )\n embed.set_author(name=\"Continue voting results:\")\n\n msg = [\n (\"#1 %s\"%(winner_sorted[0][0]), \"with: %s votes\"%(winner_sorted[0][1][0]), False),\n (\"#2 %s\"%(winner_sorted[1][0]), \"with: %s votes\"%(winner_sorted[1][1][0]), False),\n ]\n\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n\n await ctx.send(embed=embed)\n\n else:\n await ctx.send(\"You have already voted.\")\n\n else:\n await ctx.send(\"There is no voting session to continue in progress.\")\n\n\n@client.command(aliases=['removecontinue', 'delcontinue'])\n@commands.has_role('Bartender')\nasync def remove_continue(ctx):\n global continue_voters, yes_votes, no_votes\n if continue_vote == True:\n if ctx.author.id in continue_voters:\n print(continue_voters)\n\n if continue_voters[ctx.author.id][0] == \"yes\":\n yes_votes -= 1\n elif continue_voters[ctx.author.id][0] == \"no\":\n no_votes -= 1\n \n del continue_voters[ctx.author.id]\n \n print(continue_voters)\n\n await ctx.send(\"Your continue vote has been removed\")\n\n update_winner()\n\n winner_sorted = sorted(winner.items(), key=lambda x: x[1], reverse=True)\n\n embed = discord.Embed(\n colour = discord.Colour.red()\n )\n embed.set_author(name=\"Continue voting results:\")\n\n msg = [\n (\"#1 %s\"%(winner_sorted[0][0]), \"with: %s votes\"%(winner_sorted[0][1][0]), False),\n (\"#2 %s\"%(winner_sorted[1][0]), \"with: %s votes\"%(winner_sorted[1][1][0]), False),\n ]\n\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n\n await ctx.send(embed=embed)\n \n else:\n await ctx.send(\"You haven't voted yet.\")\n else:\n await ctx.send(\"There is no voting session to continue in progress.\")\n\n\n@client.command()\n@commands.has_role('Bartender')\nasync def recover(ctx):\n pass\n\n\n# prints all the commands and their functions ----------------------------------------------------------------------------------\n@client.command()\nasync def help(ctx):\n embed = discord.Embed(\n colour = discord.Colour.red()\n )\n embed.set_author(name=\"HELP\")\n msg = [\n (\"-SÓ PARA MODS\", \"-------------------------\", False),\n (\"start off (theme)\", \"Começa uma votação (SEM FASES)\", False),\n (\"start on (theme)\", 'Começa uma votação (COM FASES)', False),\n (\"stop\", \"Para a votação em andamento\", False),\n (\"rc, removecontender\", \"Remove o competidor\", False),\n (\"join\", \"Junta os competidores e seus votos usando a @ de quem os cadastrou\", False),\n (\"finish\", \"Termina a votação em andamento e define um ganhador\", False),\n (\"clear\", \"Limpa a votelist\", False),\n (\"continue start\", \"Começa uma votação para continuar\", False),\n (\"continue stop\", \"Termina a votação para continuar e mostra os resultados(IRÁ LIMPAR OS DADOS APÓS ENCERRAR!)\", False),\n (\"-CLIENTES\", \"-------------------------\", False),\n (\"add\", \"Adiciona o seu competidor para votação\", False),\n (\"change\", \"Troca a fase da votação\", False),\n (\"votelist\", \"Mostra os competidores e sua contagem de votos\", False),\n (\"vote\", \"Vota usando o nome do competidor(NÃO PRECISA DE CAPS)\", False),\n (\"votename\", \"Vota usando @ de quem o cadastrou\", False),\n (\"remove\", \"Remove o seu voto caso votou para o competidor errado\", False),\n (\"yes\", \"Vota SIM para continuar\", False),\n (\"no\", \"Vote NÃO para continuar\", False),\n (\"removecontinue, delcontinue\", \"Remove o seu voto para continuar caso votou errado\", False),\n ]\n for name, value, inline in msg:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)\n\n\n@client.command()\n@commands.has_role('Bartender')\nasync def test(ctx, member:discord.Member):\n member_id = (\"<@%s>\"%(member.id))\n await ctx.send(member_id)\n embed = discord.Embed(\n colour = discord.Colour.blue()\n )\n\n embed.set_author(name=member_id)\n await ctx.send(embed=embed)\n\n\nclient.run('*****')\n","repo_name":"ruflees/bot-kun-restart","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":23997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7731095858","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nfrom random import shuffle\n\nq_repo = \"query\"\npath_en_ccks_covid19 = os.path.join(q_repo, \"en_ccks_covid19.json\")\n\ndef load_query_map(query_map_path):\n with open(query_map_path, \"r\", encoding='utf-8') as f:\n query_map = json.load(f)\n return query_map\n\nquery_en_ccks_covid19 = load_query_map(path_en_ccks_covid19)\n\nqueries_for_dataset = {\n \"en_ccks_covid19\": query_en_ccks_covid19\n}\n\n\ndef generate_flat_ner_dta():\n\n dataset_name = \"en_ccks_covid19\"\n query_sign = \"default\"\n\n source_file_path = os.path.join(\"origin_data\", \"train.json\")\n # source_data = load_conll(train, source_file_path)\n with open(source_file_path, \"r\", encoding='utf-8') as f:\n source_data = json.load(f)\n\n shuffle(source_data)\n sent_len = int((len(source_data) / 5) * 4)\n train_source_data = source_data[:sent_len]\n dev_source_data = source_data[sent_len:]\n\n target_file_path = os.path.join(\"data\", \"mrc-ner.train\")\n generate_query_ner_dataset_(True, target_file_path, train_source_data, dataset_name=dataset_name,\n query_sign=query_sign)\n\n target_file_path = os.path.join(\"data\", \"mrc-ner.dev\")\n generate_query_ner_dataset_(True, target_file_path, dev_source_data, dataset_name=dataset_name,\n query_sign=query_sign)\n\n\ndef generate_query_ner_dataset_(train, dump_file_path, source_data,\n dataset_name=None, query_sign=\"default\"):\n \"\"\"\n Args:\n source_data_file: /data/genia/train.word.json | /data/msra/train.char.bmes\n dump_data_file: /data/genia-mrc/train.mrc.json | /data/msra-mrc/train.mrc.json\n dataset_name: one in [\"en_ontonotes5\", \"en_conll03\", ]\n entity_sign: one of [\"nested\", \"flat\"]\n query_sign: defualt is \"default\"\n Desc:\n pass\n \"\"\"\n entity_queries = queries_for_dataset[dataset_name][query_sign]\n label_lst = queries_for_dataset[dataset_name][\"labels\"]\n\n target_data = transform_examples_to_qa_features(entity_queries, label_lst, source_data)\n\n with open(dump_file_path, \"w\", encoding='utf-8') as f:\n json.dump(target_data, f, sort_keys=True, ensure_ascii=False, indent=2)\n\n\ndef transform_examples_to_qa_features(query_map, entity_labels, data_instances):\n \"\"\"\n Desc:\n convert_examples to qa features\n Args:\n query_map: {entity label: entity query};\n data_instance\n \"\"\"\n mrc_ner_dataset = []\n tmp_qas_id = 0\n for idx, data_item in enumerate(data_instances):\n tmp_query_id = 0\n for label_idx, tmp_label in enumerate(entity_labels):\n tmp_query_id += 1\n tmp_query = query_map[tmp_label]\n tmp_context = data_item[\"context\"]\n\n tmp_start_pos = []\n tmp_end_pos = []\n tmp_entity_pos = []\n\n start_end_label = data_item[\"label\"][tmp_label] if tmp_label in data_item[\"label\"].keys() else -1\n\n if start_end_label == -1:\n tmp_impossible = True\n else:\n for start_end_item in data_item[\"label\"][tmp_label]:\n start_end_item = start_end_item.replace(\",\", \";\")\n start_idx, end_idx = [int(ix) for ix in start_end_item.split(\";\")]\n tmp_start_pos.append(start_idx)\n tmp_end_pos.append(end_idx)\n tmp_entity_pos.append(start_end_item)\n tmp_impossible = False\n\n mrc_ner_dataset.append({\n \"qas_id\": \"{}.{}\".format(str(tmp_qas_id), str(tmp_query_id)),\n \"query\": tmp_query,\n \"context\": tmp_context,\n \"entity_label\": tmp_label,\n \"start_position\": tmp_start_pos,\n \"end_position\": tmp_end_pos,\n \"span_position\": tmp_entity_pos,\n \"impossible\": tmp_impossible\n })\n tmp_qas_id += 1\n\n return mrc_ner_dataset\n\n\n\n\n\n","repo_name":"JavaStudenttwo/BERT_MRC","sub_path":"run/generate_train_dev.py","file_name":"generate_train_dev.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"37"} +{"seq_id":"8630440865","text":"import json\nimport web\nfrom . import db, anonymous_report_responses\nfrom settings import USE_OLD_WEBHOOKS\nfrom app.tools.utils import get_webhook_msg_old, get_webhook_msg\nfrom app.tools.utils import queue_anonymous_report\nfrom tasks import sendsms_to_uuids_task\n\n\nclass AnonymousReports:\n def POST(self):\n params = web.input(\n contact_uuid=\"\", msg=\"\")\n contact_uuid = params.contact_uuid\n msg = params.msg\n # if USE_OLD_WEBHOOKS:\n # msg = get_webhook_msg_old(params, 'msg')\n # else:\n # payload = json.loads(web.data())\n # msg = get_webhook_msg(payload, 'msg')\n print(contact_uuid + \"=> \" + msg)\n extra_params = {'contact_uuid': contact_uuid, 'report': msg}\n report_id, has_report = queue_anonymous_report(db, extra_params)\n if has_report:\n resp_msg = \"Thank you for your consistent feedback about this health facility.\"\n else:\n resp_msg = (\n \"Your report has been sent to relevant authorities. You can also call Ministry \"\n \"of Health on 0800100066 (toll free) for further help and inquires. \"\n \"If this is an emergency contact your nearest facility\")\n\n return json.dumps({'message': resp_msg})\n\n\nclass AnonymousReportDetails:\n def GET(self, report_id):\n htmlStr = ''\n htmlStr += \"\"\n rs = db.query(\n \"SELECT id, facility, district, created, report, action, topic, \"\n \"action_taken, action_center, comment FROM anonymousreports_view \"\n \"WHERE id = $id\", {'id': report_id})\n if rs:\n rpt = rs[0]\n htmlStr += \"\" % rpt['facility']\n htmlStr += \"\" % rpt['district']\n htmlStr += \"\" % rpt['created']\n htmlStr += \"\" % rpt['report']\n htmlStr += \"\" % rpt['topic']\n htmlStr += \"\" % rpt['action']\n htmlStr += \"\" % rpt['action_center']\n htmlStr += \"\" % rpt['comment']\n htmlStr += \"\" % rpt['action_taken']\n htmlStr += \"\" % anonymous_report_responses(rpt['id'])\n\n htmlStr += \"
FieldValue
Facility%s
District%s
Date%s
Reports%s
Topic%s
Action%s
Action Center%s
Comments%s
Action Taken%s
Responses%s
\"\n return htmlStr\n\n\nclass AnonReport:\n def GET(self, report_id):\n web.header(\"Content-Type\", \"application/json; charset=utf-8\")\n rs = db.query(\n \"SELECT id, facility, district, to_char(created, 'YYYY-MM-DD HH:MI:SS') AS created,\"\n \"report, action, topic, action_taken, action_center, comment, \"\n \"districtid, facilityid FROM anonymousreports_view \"\n \"WHERE id = $id\", {'id': report_id})\n if rs:\n rpt = rs[0]\n\n report = {\n 'id': rpt['id'], 'facility': rpt['facility'], 'district': rpt['district'],\n 'created': rpt['created'], 'report': rpt['report'], 'action': rpt['action'],\n 'topic': rpt['topic'], 'action_taken': rpt['action_taken'],\n 'action_center': rpt['action_center'], 'comment': rpt['comment'],\n 'facilityid': rpt['facilityid'], 'districtid': rpt['districtid'],\n 'responses': anonymous_report_responses(rpt['id'])\n }\n return json.dumps(report)\n return json.dumps({})\n\n def POST(self, report_id):\n web.header(\"Content-Type\", \"application/json; charset=utf-8\")\n params = web.input()\n rs = db.query(\n \"SELECT contact_uuid, action_taken FROM anonymousreports WHERE id = $id\",\n {'id': report_id})\n if rs:\n rpt = rs[0]\n\n db.query(\n \"UPDATE anonymousreports SET (facilityid, districtid, action, action_center, \"\n \"topic, action_taken) = ($facility, $district, $action, $action_center, \"\n \"$topic, $action_taken) \"\n \" WHERE id = $id\", {\n 'id': report_id, 'facility': params.facility if params.facility else None,\n 'district': params.district if params.district else None,\n 'action': params.action,\n 'action_center': params.action_center, 'topic': params.topic,\n 'action_taken': params.action_taken})\n\n if params.action_taken and (rpt['action_taken'] != params.action_taken):\n # send action taken to use\n sendsms_to_uuids_task.delay([rpt['contact_uuid']], params.action_taken)\n db.query(\n \"INSERT INTO anonymousreport_messages (report_id, message, direction) \"\n \"VALUES($report_id, $msg, 'O') \", {'report_id': report_id, 'msg': params.action_taken})\n\n return json.dumps({\"message\": \"saved successfully.\", \"status\": \"success\"})\n","repo_name":"gcinnovate/mtracpro","sub_path":"web/app/controllers/api7.py","file_name":"api7.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"14214162568","text":"from django.shortcuts import render\nfrom rest_framework.mixins import ListModelMixin,CreateModelMixin,DestroyModelMixin,\\\n UpdateModelMixin,RetrieveModelMixin\nfrom rest_framework.viewsets import GenericViewSet,ModelViewSet\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom tools.permission import IsOwnerOrReadOnly\nfrom .models import ShoppingCart,OrderInfo,OrderGoods\nfrom .serializer import ShoppingCartSerializer,ShoppingCartDetailSerializer,OrderSerializer\n# Create your views here.\n\nclass ShoppingCartViewSet(ModelViewSet):\n queryset = ShoppingCart.objects.all()\n serializer_class = ShoppingCartSerializer\n lookup_field = 'goods_id'\n\n permission_classes = [\n IsAuthenticated,\n IsOwnerOrReadOnly,\n ]\n authentication_classes = [\n SessionAuthentication,\n JSONWebTokenAuthentication,\n ]\n def get_serializer_class(self):\n if self.action == 'list':\n return ShoppingCartDetailSerializer\n return ShoppingCartSerializer\n\n def get_queryset(self):\n return ShoppingCart.objects.filter(\n user=self.request.user.id\n )\n def destroy(self, request, *args, **kwargs):\n goods_id = kwargs.get('goods_id')\n if goods_id:\n return super(ShoppingCartViewSet, self).destroy(request,*args,**kwargs)\n else:\n queryset = self.get_queryset()\n for i in queryset:\n self.perform_destroy(i)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass OrderViewset(ListModelMixin,CreateModelMixin,DestroyModelMixin,RetrieveModelMixin,GenericViewSet):\n # 权限设置\n permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)\n # 认证设置\n authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)\n\n serializer_class = OrderSerializer\n\n def get_queryset(self):\n return OrderInfo.objects.filter(user=self.request.user)\n\n def perform_create(self, serializer):\n order = serializer.save()\n shop_carts = ShoppingCart.objects.filter(user=self.request.user)\n for shop_cart in shop_carts:\n #获取购物车中的所有商品信息保存在订单商品中\n order_goods = OrderGoods()\n order_goods.goods = shop_cart.goods\n order_goods.goods_num = shop_cart.nums\n order_goods.order = order\n order_goods.save()\n shop_cart.delete()\n # return order\nfrom rest_framework.views import APIView\nfrom tools.alipay import AliPay\nfrom lnk.settings import APPID,PUBLIC_KEY, PRIVATE_KEY,TEXT_URL\nfrom rest_framework.response import Response\nfrom datetime import datetime\nfrom django.shortcuts import redirect\n\nclass AlipayView(APIView):\n def get(self, request):\n \"\"\"\n 处理支付宝的return_url返回\n :param request:\n :return:\n \"\"\"\n processed_dict = {}\n for key, value in request.GET.items():\n processed_dict[key] = value\n\n sign = processed_dict.pop(\"sign\", None)\n\n print('======',processed_dict)\n\n alipay = AliPay(\n appid=APPID,\n app_notify_url=\"http://127.0.0.1:8000/alipay/return/\",\n app_private_key_path=PRIVATE_KEY,\n alipay_public_key_path=PUBLIC_KEY, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n debug=True, # 默认False,\n return_url=\"http://127.0.0.1:8000/alipay/return/\"\n )\n #根据成功返回的参数和签名是否一致\n verify_re = alipay.verify(processed_dict, sign)\n\n if verify_re is True:\n #获取订单号\n order_sn = processed_dict.get('out_trade_no', None)\n trade_no = processed_dict.get('trade_no', None)\n trade_status = processed_dict.get('trade_status', 'TRADE_SUCCESS')\n #更新订单支付信息\n existed_orders = OrderInfo.objects.filter(order_sn=order_sn)\n for existed_order in existed_orders:\n existed_order.pay_status = trade_status\n existed_order.trade_no = trade_no\n existed_order.pay_time = datetime.now()\n existed_order.save()\n\n response = redirect(\"http://127.0.0.1:8080/#/app/home/index\")\n response.set_cookie(\"nextPath\",\"pay\", max_age=3)\n return response\n else:\n response = redirect(\"http://127.0.0.1:8080/#/app/home/index\")\n return response\n\n def post(self, request):\n \"\"\"\n 处理支付宝的notify_url\n :param request:\n :return:\n \"\"\"\n processed_dict = {}\n for key, value in request.POST.items():\n processed_dict[key] = value\n\n sign = processed_dict.pop(\"sign\", None)\n\n alipay = AliPay(\n appid=APPID,\n app_notify_url=\"http://127.0.0.1:8000/alipay/return/\",\n app_private_key_path=PRIVATE_KEY,\n alipay_public_key_path=PUBLIC_KEY, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,\n debug=True, # 默认False,\n return_url=\"http://127.0.0.1:8000/alipay/return/\"\n )\n\n verify_re = alipay.verify(processed_dict, sign)\n\n if verify_re is True:\n order_sn = processed_dict.get('out_trade_no', None)\n trade_no = processed_dict.get('trade_no', None)\n trade_status = processed_dict.get('trade_status', 'TRADE_SUCCESS')\n\n existed_orders = OrderInfo.objects.filter(order_sn=order_sn)\n for existed_order in existed_orders:\n order_goods = existed_order.goods.all()\n for order_good in order_goods:\n goods = order_good.goods\n goods.sold_num += order_good.goods_num\n goods.save()\n\n existed_order.pay_status = trade_status\n existed_order.trade_no = trade_no\n existed_order.pay_time = datetime.now()\n existed_order.save()\n\n return Response(\"success\")\n","repo_name":"Carneyyy/django","sub_path":"apps/trade/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39793265030","text":"import datetime\r\n\r\nfrom exceptions import ValidateError\r\n\r\nclass Data:\r\n def __init__(self, name: str, age: str):\r\n self.name = name\r\n self.age = age\r\n self._clear_whitespaces()\r\n self.age = int(self.age)\r\n\r\n def _clear_whitespaces(self):\r\n self.name = self.name.strip()\r\n self.age = self.age.strip()\r\n\r\n\r\nclass DataWithDate(Data):\r\n def __init__(self, name: str, age: str):\r\n super().__init__(name, age)\r\n self.clock = datetime.datetime.utcnow()\r\n\r\n\r\nclass Validator:\r\n def __init__(self):\r\n self.data_history: list[Data] = []\r\n\r\n def validate(self, data: Data):\r\n self.data_history.append(data)\r\n self._validate_name()\r\n self._validate_age()\r\n\r\n def _validate_name(self):\r\n\r\n if not self.data_history:\r\n raise ValidateError('Ошибка: Нет данных.\\n')\r\n\r\n name = self.data_history[-1].name\r\n\r\n if not name:\r\n raise ValidateError('Ошибка: Вы не ввели имя.\\n')\r\n\r\n elif len(name) < 3:\r\n raise ValidateError('Ошибка: Минимальная длина имени - 3 символа.\\n')\r\n\r\n elif name.count(' ') > 1:\r\n raise ValidateError('Ошибка: Максимальное количество ��робелов - 1 символ.\\n')\r\n\r\n def _validate_age(self):\r\n\r\n if not self.data_history:\r\n raise ValueError('Неверный тип данных\\n')\r\n\r\n age = self.data_history[-1].age\r\n\r\n # print(type(age))\r\n\r\n if age <= 0:\r\n raise ValidateError('Ошибка: Вам не может быть 0 лет или меньше.\\n')\r\n\r\n elif age < 14:\r\n raise ValidateError('Ошибка: Программой запрещено пользоваться, если вам меньше 14 лет.\\n')\r\n\r\n","repo_name":"AZemski/homework","sub_path":"validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19579090495","text":"import openai\nfrom flask import Flask, request, render_template, redirect\n\nserver = Flask(__name__)\n\nopenai.api_key = 'xxxxx'\n\n\ndef get_completion(question):\n try:\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=f\"{question}\\n\",\n temperature=0.9,\n max_tokens=2048,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0.6,\n stop=None\n )\n except Exception as e:\n\n print(e)\n return e\n return response[\"choices\"][0].text\n\n\n@server.route('/chat', methods=['GET', 'POST'])\ndef get_request_json():\n if request.method == 'POST':\n if len(request.form['question']) < 1:\n return render_template(\n 'chat.html', question=\"null\", res=\"问题不能为空\")\n question = request.form['question']\n print(\"======================================\")\n print(\"接到请求:\", question)\n res = get_completion(question)\n print(\"问题:\\n\", question)\n print(\"答案:\\n\", res)\n\n return render_template('chat.html', question=question, res=str(res))\n return render_template('chat.html', question=0)\n\n\nif __name__ == '__main__':\n server.run(debug=True, host='0.0.0.0', port=80)\n","repo_name":"AlliotTech/chatgpt-web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"37"} +{"seq_id":"70851479467","text":"from ast import FormattedValue\nfrom unittest import result\nimport streamlit as st\nimport pandas as pd\nimport requests, os\nfrom datetime import datetime\nfrom forex_python.converter import CurrencyRates\n\nnow = datetime.now()\nst.sidebar.title( \" 현 시각 달러환율\" )\nst.sidebar.write( \"Date/Time:\", now )\nCR = CurrencyRates()\nresult = CR.convert('USD', 'KRW', 1 )\nFormattedValue = \"{:.5f}\".format(result) \nvalue = float(FormattedValue)\nst.sidebar.write( \" 1 $ = \", value)\n\nst.header('🍉 Realtime Exchange Rates')\nnew_sub_1 = '

1) Fixer API 사용

'\nst.markdown(new_sub_1, unsafe_allow_html=True)\n\ncurrency_list_1 = ['USD','KRW','AUD', 'BGN', 'BRL', 'CAD', 'CHF', 'CNY', 'CZK', 'DKK', 'GBP', 'HKD', 'HRK', 'HUF', 'IDR', 'ILS', 'INR', 'ISK', 'JPY', 'MXN', 'MYR', 'NOK', 'NZD', 'PHP', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'THB', 'TRY', 'ZAR']\ncurrency_list_2 = ['EUR', 'USD','AUD', 'KRW', 'BGN', 'BRL', 'CAD', 'CHF', 'CNY', 'KRW', 'CZK', 'DKK', 'GBP', 'HKD', 'HRK', 'HUF', 'IDR', 'INR', 'ISK', 'JPY', 'MXN', 'MYR', 'NOK', 'NZD', 'PHP', 'PLN', 'RON', 'SEK', 'SGD', 'THB', 'TRY', 'ZAR'] #'ILS', 'RUB',\n\nbase_cur = st.selectbox('- Select base currency for conversion', currency_list_1)\ntarget_currency = st.multiselect(' - Select target currency to convert ',currency_list_2, default=['JPY','PHP'])\ntitle = [ 'Base_Currency', 'Target_Currency', 'Price', 'Conversion_Date' ] # bracket's meaning of importtance ?? \n\n@st.cache\ndef load_data():\n df = pd.DataFrame( columns = title )\n print(target_currency)\n for i,name in enumerate(target_currency):\n print(\"currencies:\",name)\n url = ''.join(['https://api.apilayer.com/exchangerates_data/convert?to=',name,'&from=', base_cur,'&amount=','1'])\n payload = {}\n headers= { \"apikey\": \"ifAw0sv2rpyoZTCrFe2kKI1i9Z4wURv3\" } # 최신\n response = requests.request(\"GET\", url, headers=headers, data = payload)\n status_code = response.status_code\n result = response.text\n data = response.json()\n print(data)\n cc = [ ( data['query']['from'],data['query']['to'],data['info']['rate'],data['date'] )]\n print ( \"cc:\", cc)\n dfnew = pd.DataFrame( cc, columns = title )\n df = df.append( pd.DataFrame( dfnew, columns = title) )\n return df\n\nqq=load_data()\nst.text ('-Completion of Currency conversion like below !! ')\nst.write ( qq )\n\n#======2nd step ====================================\n\nst.markdown(\"---------------\")\nst.markdown(\"\")\n\nnew_sub_2 = '

2) Python API 사용

'\nst.markdown(new_sub_2, unsafe_allow_html=True)\n\n# error in presence of @st.cache \ntitle_2 = ['Currencies', 'Rates']\ndg = pd.DataFrame( columns = title_2 ) # reserve\n\ndef currency_all():\n \n col1, col2, col3 = st.columns(3)\n CR = CurrencyRates()\n for i,j in enumerate(currency_list_2):\n Result = CR.convert(base_cur, j, 1)\n colname = str(base_cur + '/' + j)\n FormattedValue = \"{:.5f}\".format(Result) \n value = float(FormattedValue)\n\n if i < 11: \n with col1:\n st.write(colname, value )\n\n elif (i > 10) and (i<21) : \n with col2:\n st.write(colname, value)\n\n elif(i > 20):\n with col3:\n st.write(colname, value)\n\n # dgnew = pd.DataFrame(value, columns=title_2, axis=1 )\n # dg = dg.append ( pd.DataFrame(dgnew, columns = colname ) )\n # # dg = pd.DataFrame(dgnew, columns = colname ) \n # return dg\n\n# rr =currency_all()\n\n# rr.to_excel('Currency_Rate form Forex API')\ncurrency_all()\n# st.write ( \" ( Caution: 'THB', 'RUB' currency rate is not available )\")\nopt = st.radio( label = '- Select option for retriving the data', options = ['Go','Stop(Current Data Save)'] )\nst.write('', unsafe_allow_html = True)\n\n","repo_name":"ujin725/exchange_1","sub_path":"pages/01_RealtimeRate.py","file_name":"01_RealtimeRate.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22721769140","text":"#!/usr/bin/python3\nimport random\nnumber = random.randit(-10000, 10000)\nif number < 0:\n lastd = number % -10\nelse:\n lastd = number % 10\nprint('Last digit of {} is '.format(number), end=\"\")\nif lastd > 5:\n print('{} and is greaer than 5'.format(lastd))\nelif lastd == 0:\n print('{} and is 0'.format(lastd))\nelif lastd < 6 and lastd != 0:\n print('{} and is less than 6 and not 0'.format(lastd))\n","repo_name":"aspiringsoftwaredeveloper/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/1-last_digit.py","file_name":"1-last_digit.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72695186667","text":"import numpy as np\n######### tfp\nimport tensorflow as tf\n# tf.enable_v2_behavior()\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\nfrom scipy.special import log_softmax, softmax\n\nclass GLASS:\n def __init__(self, shrinkage_factor=0, dtype = tf.float32) -> None:\n self.shrinkage_factor, self.dtype = shrinkage_factor, dtype\n \n def fit(self, X: np.ndarray, y: np.ndarray, num_steps=10000, sample_size=10, learning_rate=0.001):\n self.X_train = tf.constant(X, dtype=self.dtype)\n self.y_train = tf.constant(y, dtype=self.dtype)\n self.n, self.K, self.n_channel, self.n_Tz = self.X_train.shape\n self.n_corr = round(self.n_channel*(self.n_channel-1)/2)\n def jointmodel():\n sigma = yield tfd.Sample(tfd.HalfCauchy(0.0, tf.cast(1, self.dtype)), self.n_channel)\n # covmat\n covmat_chol = yield tfd.LKJ(dimension=self.n_channel, concentration=2, input_output_cholesky=True)\n # weights\n beta = yield tfb.Cumsum()(tfd.Sample(tfd.Normal(0.0, tf.cast(1, self.dtype)), (self.n_channel, self.n_Tz)))\n beta = sigma[:, None] * tf.linalg.matmul(covmat_chol, beta)\n beta = tfp.math.soft_threshold(beta, self.shrinkage_factor)\n logits = tf.linalg.tensordot(self.X_train, beta, axes = [[2, 3], [0, 1]])\n y = yield tfd.Multinomial(logits = logits, total_count = 1)\n self.joint = tfd.JointDistributionCoroutineAutoBatched(jointmodel)\n\n self.posterior = tfd.JointDistributionSequentialAutoBatched([\n tfd.LogNormal(\n tf.Variable(tf.zeros(self.n_channel, dtype = self.dtype) - 3),\n tfp.util.TransformedVariable(0.1 * tf.ones(self.n_channel, dtype = self.dtype), bijector = tfb.Softplus())),\n tfb.CorrelationCholesky()(tfd.Independent(tfd.Normal(\n tf.Variable(0.1*tf.random.normal((self.n_corr, ), dtype = self.dtype), \n dtype = self.dtype),\n tfp.util.TransformedVariable(0.1*tf.ones(self.n_corr, dtype = self.dtype), \n bijector = tfb.Softplus())), 1)),\n tfd.Normal(\n tf.Variable(tf.random.normal((self.n_channel, self.n_Tz), stddev=3*self.shrinkage_factor, dtype = self.dtype), \n dtype = self.dtype),\n tfp.util.TransformedVariable(0.01 * tf.ones((self.n_channel, self.n_Tz), dtype = self.dtype), \n bijector = tfb.Softplus())),\n ])\n\n self.losses = []\n optimizer = tf.optimizers.Adam(learning_rate=learning_rate)\n self.losses += list(tfp.vi.fit_surrogate_posterior(\n self.loglik, \n self.posterior,\n optimizer = optimizer,\n num_steps = num_steps, \n sample_size = sample_size))\n \n self.losses = [float(x) for x in self.losses]\n self.samples = [np.array(x) for x in self.posterior.sample(10000)]\n self.betaMats = self.get_betaMats()\n self.betaMat = np.median(self.betaMats, axis=0)\n \n def loglik(self, *args):\n return self.joint.log_prob(*args, self.y_train)\n \n def get_betaMats(self):\n sigmas, corr_trils, beta_raws = self.samples\n betaMats = sigmas[:, :, None] * tf.linalg.matmul(corr_trils, beta_raws)\n return np.array(tfp.math.soft_threshold(betaMats, self.shrinkage_factor))\n \n # @property\n # def DIC(self):\n # l = self.loglik(*self.posterior.mean()).numpy()\n # samples = self.posterior.sample(10000)\n # ls = self.loglik(*samples).numpy()\n # return -2 * (2*ls.mean() - l)\n \n @property\n def corr(self):\n corr_trils = self.samples[1]\n corr_mats = np.array([corr_tril@corr_tril.T for corr_tril in corr_trils])\n return np.quantile(corr_mats, [0.05, 0.5, 0.95], axis=0)\n \n def predict_logprob(self, newX: np.ndarray):\n return np.tensordot(newX, self.betaMat, axes=[[2,3], [0,1]])","repo_name":"BangyaoZhao/GLASS","sub_path":"glass.py","file_name":"glass.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12993519","text":"# encoding=utf-8\nimport os\nimport requests\nimport json\nproblem_name=input(\"请输入题号\\n\")\nwork_dir=\"D:\\\\OI-training\\\\\"\nif(problem_name[0]=='P'):\n\tos.chdir(work_dir+\"Luogu\\\\\"+problem_name[0:2]+\"\\\\\"+problem_name[0:3]+\"\\\\\"+problem_name+\"\\\\\")\nfile=open(problem_name+\".cpp\",\"r+\")\nparam={\"code\":file.read(),\"enableO2\":1,\"lang\":0}\nheader={\"Cookie\":\"__client_id=4488cb875a0b7b3dbbf00172abf123ce2381ab21; _uid=499156\",\"Referer\":\"https://www.luogu.com.cn/problem/\"+problem_name,\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203\",\"X-Csrf-Token\":\"1692662667:moEtnmWDZe+3r6VwZVdVi+tq9NsOOmsuq2m+/POSUTY=\"}\n\nresponse=requests.post(\"https://www.luogu.com.cn/fe/api/problem/submit/\"+problem_name,params=param,headers=header)\nans=json.loads(response.text)\nrid=ans[\"rid\"]\nos.system(\"start \\\"C:\\\\Program Files (x86)\\\\Microsoft\\\\Edge\\\\Application\\\\msedge.exe\\\" https://www.luogu.com.cn/record/\"+rid)","repo_name":"HMZ0915/OI-training","sub_path":"commit.py","file_name":"commit.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25536994020","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import normalize\nfrom multiprocessing import Pool\nimport time\nfrom sklearn.model_selection import train_test_split\n\n# kaggle dataset, 28*28 centered letters, à concaténer avec la mnist\n\n## !! premier B à la position 13870 !! ##\n\ndef get_lettre(l):\n def gene():\n iniLettre = \"A\"\n for i in range(26):\n yield chr(ord(iniLettre) + i)\n return list(gene())[l]\n\n\ndef convertImg(img):\n tmps = \"\"\n l = []\n tmpl=[]\n j=0\n for e in img:\n if e == ',':\n tmpl += [int(tmps)]\n tmps = \"\"\n j+=1\n \n else:\n tmps += e\n\n if j == 28:\n j=0\n l += [tmpl]\n tmpl = []\n \n tmpl += [int(tmps)]\n l += [tmpl]\n l = np.array(l)\n return l\n\n\nclass CalculateChunk():\n def __init__(self, chunk, processNB):\n self.chunk = chunk\n self.processed = pd.DataFrame(columns=[\"imgNF\", 'sparse_lettre', \"lettre\"])\n self.processNB = processNB\n self.progress = 0\n\n def shift(self, x):\n self.progress += 1\n #print(\"Process N°\", self.processNB, \" Progress : \", self.progress)\n l = x.split(\",\")[0]\n img = convertImg(x[len(l)+1:])\n l = int(l)\n dic = {\"imgNF\": normalize(img, axis=1), \"sparse_lettre\": l, \"lettre\": get_lettre(l)}\n self.processed = self.processed.append(dic, ignore_index=True)\n return None\n\n\n def run(self):\n \"\"\"Code à exécuter pendant l'exécution du thread.\"\"\"\n self.chunk[\"img\"].apply(lambda x: self.shift(x))\n return self.processed\n\n\ndef makeChunk(df, n):\n fractile = int(len(df)/n)\n chunkList = []\n a = 0\n b = 0\n for _ in range(n-1):\n b = a + fractile\n chunkList += [df.loc[a:b]]\n a = b+1\n chunkList += [df.loc[a:]]\n return chunkList\n\ndef run(x):\n return x.run()\n\ndef runWorkers(df, nb):\n pool = Pool(processes=nb)\n chunkList = makeChunk(df, nb)\n chunkList = [CalculateChunk(chunkList[i], i+1) for i in range(len(chunkList))]\n results = [pool.apply_async(run, args=(x,)) for x in chunkList]\n results = [p.get() for p in results]\n return results\n\ndef buildDataList():\n data = pd.read_csv(\"src/neural/trainingData/kaggle/A_Z Handwritten Data.csv\", sep='\\n')\n print(\"début de la construction ...\")\n start = time.time()\n print(\"--> début à : \", start)\n dfList = runWorkers(data, 50)\n end = time.time()\n print(\"--> fin à : \", end)\n hours, rem = divmod(end-start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"--> dataset construit en : {:0>2}:{:0>2}:{:05.2f}\".format(int(hours),int(minutes),seconds))\n print(\"finished building dataset !!!\")\n return dfList\n\n# split a hauteur de 10%\ndef split_train_valid(dfList):\n df = pd.concat(dfList)\n df = df.reset_index(drop=True)\n train, test = train_test_split(df, test_size=0.1)\n df = None\n trainList = makeChunk(train.reset_index(drop=True), 30)\n return (trainList, test.reset_index(drop=True))\n\ndef saveDatasets(trainList, test):\n for i in range(len(trainList)):\n trainList[i].to_json(\"src/neural/trainingData/kaggleV3/train/kagglePart\" + str(i) + \".json\")\n \n test.to_json(\"src/neural/trainingData/kaggleV3/test/test.json\")\n\n print(\"dataset sauvegardé en 30 chunks\")\n return 0\n\ndef main():\n dfList = buildDataList()\n trainList, test = split_train_valid(dfList)\n saveDatasets(trainList, test)\n return 0\n\nif __name__ == \"__main__\":\n main()\n# ipython -m src.neural.dataBuilder.buildKaggleV3","repo_name":"the-mousaillon/handwritten_char_recognition","sub_path":"src/neural/dataBuilder/buildKaggleV3.py","file_name":"buildKaggleV3.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26955250727","text":"'''\nDownload scenes from a given CSV list of scene IDs or product IDs.\n'''\nimport argparse\nimport csv\nimport os\nimport re\nimport urllib.request\nfrom login import login\n\ndef main(output_dir, csv_path, scene_ids, dataset, landsat):\n '''\n Download scenes from a given list of scene IDs or product IDs.\n INPUTS:\n output_dir: str : directory to download data in\n csv_path : str : path to CSV containing scene or product IDs\n scene_ids : bool : if True, CSV contains scene IDs; \n if False, CSV contains product IDs\n OPTIONAL INPUTS:\n dataset : str : name to identify dataset (if all ids are \n from the same dataset)\n landsat : bool : if True, IDs in CSV are all from Landsat datasets\n RETURNS:\n tar files downloaded in output_dir\n '''\n # login to EROS account\n api = login()\n\n print('Converting CSV to list')\n if scene_ids == True:\n # csv contains scene IDs\n scene_ids = csv_to_list(csv_path, 'scene')\n\n if dataset == False and landsat == True:\n # assign datasets to scenes\n datasets, scene_ids = assign_datasets(scene_ids)\n\n elif dataset == False and landsat == False:\n # datasets are unknown but NOT landsat\n raise Exception('Must supply dataset name; this option is currently not supported')\n\n elif dataset == True:\n # csv contains datasets\n datasets = csv_to_list(csv_path, 'dataset')\n\n \n elif scene_ids == False:\n # csv contains product IDs\n product_ids = csv_to_list(csv_path, 'product')\n\n if dataset == False and landsat == True:\n # assign datasets and scenes to products\n datasets, scene_ids = assign_datasets_and_scenes(product_ids, api)\n\n if dataset == False and landsat == False:\n # datasets are unknown but NOT landsat\n raise Exception('Must supply dataset name; this option is currently not supported')\n\n elif dataset == True:\n # csv contains datasets\n datasets, scene_ids = assign_scenes(product_ids, api)\n\n # download data\n os.makedirs(output_dir, exist_ok=True)\n\n print(scene_ids)\n print(datasets)\n\n for i, entity_id in enumerate(scene_ids):\n print(f'Downloading scene {i+1} of {len(scene_ids)}')\n \n dataset = datasets[i]\n # create output filename\n filename = os.path.join(output_dir, entity_id)\n\n # get download code\n try:\n download_opts = api.download_options(dataset, entity_id)\n download_opts_list = download_opts[0]['downloadOptions']\n if len(download_opts_list) > 1:\n download_code = 'STANDARD'\n else:\n download_code = download_opts_list[0]['downloadCode']\n\n except Exception:\n print('No download code found for this scene.')\n continue\n\n # get download information\n response = api.download(dataset, download_code, entity_id)\n\n if response == []:\n raise Exception('No dataset matches the inputs provided')\n continue\n \n # download dataset\n url = response[0]['url']\n urllib.request.urlretrieve(url, filename)\n \n # logout of EROS account\n api.logout()\n\n\ndef assign_datasets(scene_ids):\n '''\n Assign datasets to scene IDs.\n '''\n datasets = []\n scene_ids_real = []\n for i, scene_id in enumerate(scene_ids):\n # find dataset\n dataset = landsat_dataset(scene_id, scene_id=True)\n if dataset == '':\n # CSV did not contain an ID\n continue\n datasets.append(dataset)\n scene_ids_real.append(scene_id)\n scene_ids = scene_ids_real\n return datasets, scene_ids_real\n\n\ndef assign_datasets_and_scenes(product_ids, api):\n '''\n Assign datasets and scene IDs to product IDs.\n '''\n datasets = []\n scene_ids = []\n for i, product_id in enumerate(product_ids):\n print(f'Finding scene ID and dataset {i+1} of {len(product_ids)}')\n # find dataset\n dataset = landsat_dataset(product_id, scene_id=False)\n if dataset == '':\n # CSV did not contain an ID\n continue\n datasets.append(dataset)\n\n # find scene ID\n try:\n scene_id = api.id_lookup(dataset, product_id,\n input_field='displayId')\n except Exception:\n print('Invalid product ID')\n # invalid product ID\n continue\n scene_ids.append(scene_id[0])\n return datasets, scene_ids\n\n\ndef assign_scenes(product_ids, api):\n '''\n Assign scenes to product IDs with known datasets.\n '''\n datasets = csv_to_list(csv_path, 'dataset')\n scene_ids = []\n for i, product_id in enumerate(product_ids):\n print(f'Finding scene ID {i+1} of {len(product_ids)}')\n dataset = datasets[i]\n try:\n scene_id = api.id_lookup(dataset, product_id,\n input_field='displayId')\n except Exception:\n print('Invalid product ID or dataset')\n # invalid product ID or dataset\n continue\n scene_ids.append(scene_id[0])\n return datasets, scene_ids\n\n\ndef csv_to_list(csv_path, header_str):\n '''\n Convert csv file column to list.\n INPUTS: \n csv_path : str : path to csv document\n header_str : str : string to search for in column headers\n RETURNS:\n col_list : list : list of data in specified column\n '''\n col_list = []\n\n with open(csv_path, newline='') as f:\n reader = csv.DictReader(f)\n\n for row in reader:\n for cols in row.items():\n header = cols[0]\n col_list_search = re.compile(f'{header_str}',\n flags=re.IGNORECASE)\n\n if col_list_search.search(header):\n col_list.append(cols[1])\n if col_list == []:\n raise Exception(f'No column called {header_str} in CSV')\n\n return col_list \n\n\ndef landsat_dataset(data_id, scene_id=True):\n '''\n INPUTS:\n data_id : str : either scene or product ID\n scene_id : bool : if True, the data_id is a scene ID;\n if False, data_id is a product ID\n RETURNS:\n dataset : str : corresponding Landsat dataset string\n '''\n \n if scene_id == True:\n info = data_id[0:3]\n if scene_id == False:\n info = data_id[0:4]\n\n MSS_choices = ['LM01', 'LM02', 'LM03', 'LM04', 'LM05',\n 'LM1', 'LM2', 'LM3', 'LM4', 'LM5']\n TM_choices = ['LT04', 'LT05', 'LT4', 'LT5']\n ETM_choices = ['LE07', 'LE7']\n L8_choices = ['LC08', 'LC8']\n\n if info in MSS_choices:\n dataset = 'LANDSAT_MSS_C1'\n elif info in TM_choices:\n dataset = 'LANDSAT_TM_C1'\n elif info in ETM_choices:\n dataset = 'LANDSAT_ETM_C1'\n elif info in L8_choices:\n dataset = 'LANDSAT_8_C1'\n else:\n return ''\n return dataset\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Download scenes from a given CSV list of scene IDs or product IDs (specify with --scene_ids or --product_ids).')\n\n parser.add_argument('output_dir',\n metavar='OUTPUT_DIR', type=str,\n help='directory to download data to')\n parser.add_argument('csv_path',\n metavar='CSV_PATH', type=str,\n help='path to CSV containing scene or product IDs')\n parser.add_argument('--dataset',\n dest='dataset',\n action='store_true',\n help='if flagged, CSV contains a column with dataset names')\n parser.add_argument('--landsat',\n dest='landsat',\n action='store_true',\n help='if flagged, IDs in CSV are all from Landsat datasets')\n\n id_type = parser.add_mutually_exclusive_group(required=True)\n id_type.add_argument('--scene_ids',\n dest='scene_ids',\n action='store_true',\n help='if flagged, CSV contains scene IDs')\n id_type.add_argument('--product_ids',\n dest='scene_ids',\n action='store_false',\n help='if flagged, CSV contains product IDs')\n\n args = parser.parse_args()\n\n main(**vars(args))\n","repo_name":"madhunt/DSWE_EaD","sub_path":"API/earth_explorer_api/applications/download_list.py","file_name":"download_list.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31254172167","text":"\"\"\"\nSolved\n20220401\n16197.py\n두 동전\n\"\"\"\n\nimport sys\nfrom collections import defaultdict, deque\ninput = sys.stdin.readline\n\nn, m = map(int,input().split())\nli =[list(input().strip()) for _ in range(n)]\n\ncoins = []\nfor i, st in enumerate(li):\n while True:\n try:\n j = st.index('o')\n li[i][j] = '.'\n coins.append((i,j))\n except ValueError:\n break\n\nq = deque([(tuple(coins),0)])\nans = -1\nvisit = defaultdict(bool)\nvisit[q[0][0]] = True\nwhile q:\n ((y1,x1),(y2,x2)),t =q[0]\n q.popleft()\n if t == 10:\n ans = -1\n break\n flag = False\n for dy,dx in zip((-1,0,1,0),(0,1,0,-1)):\n ny1 = y1 + dy\n nx1 = x1 + dx\n ny2 = y2 + dy\n nx2 = x2 + dx\n \n if (ny2>=n or ny2<0 or nx2<0 or nx2>=m) and (ny1>=n or ny1<0 or nx1<0 or nx1>=m):\n continue\n if (ny2>=n or ny2<0 or nx2<0 or nx2>=m) or (ny1>=n or ny1<0 or nx1<0 or nx1>=m):\n flag = True\n break\n if li[ny2][nx2] == '#':\n ny2-=dy\n nx2-=dx\n if li[ny1][nx1] == '#':\n ny1-=dy\n nx1-=dx\n if visit[((ny2,nx2),(ny1,nx1))]or visit[((ny1,nx1),(ny2,nx2))]:\n continue\n \n visit[((ny2,nx2),(ny1,nx1))] = True\n q.append((((ny1,nx1),(ny2,nx2)),t+1))\n if flag:\n ans = t+1\n break\n\nprint(ans)\n","repo_name":"yunyshs01/algo","sub_path":"src/16917.py","file_name":"16917.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33376660021","text":"'''\n네이버 증권 뉴스 json 파일을 불러오고, 이를 csv 파일로 변환하여 출력하는 프로그램\n\n'''\n\nimport json\nimport csv\nimport requests\n\nnewsCountPerPage = 1000 #한 요청마다 불러올 뉴스 기사의 수\n#보통 하루에 20여 개 정도의 뉴스가 올라옴 => 한 요청마다 {newsCountPerPage}개씩 불러오므로, 한 요청당 약 100일 분량의 뉴스를 불러옴\n\n# DATETIME = \"20191231\" #{DATETIME}~오늘(프로그램을 실행하는 시점)까지의 뉴스 기사를 불러옴\n#ex> 2020년 AA월 BB일까지의 뉴스 기사를 불러옴 => DATETIME = \"2020AABB\" | 2020년 X월 Y일 => DATETIME = \"20200X0Y\"\n\nTOTAL_FETCH_COUNT = 35 #최대 요청 횟수\n#어떤 경우라도 네이버 서버에 TOTAL_FETCH_COUNT번 이상 요청을 보내지 않음\n\n#기록할 csv 파일\ncsvFileName = \"./newsList_raw.csv\"\n#읽어올 json 파일\njsonFileName = \"./out.json\"\n\ncurrentPage = 1 #현재 페이지\nurl = f\"https://m.stock.naver.com/api/json/news/newsListJson.nhn?category=mainnews&pageSize={newsCountPerPage}&page=\"\n# targetedExpectedDate = False\nlistJson = []\n\n# def compareDateWithoutTime(date0, date1):\n# #3: 매개변수 오류, 0: 일치, 2:date0이 date1보다 이름, 1: date0이 date1보다 늦음\n# if type(date0) is not str or type(date1) is not str: raise TypeError #매개변수의 자료형이 string이 아닌 경우\n# if len(date0) != len(date1) != 8: return 3 #두 매개변수의 길이가 일치하지 않는 경우\n\n# #첫 번째 루프 => 월 비교 | 두 번째 루프 => 일 비교\n# for i in range(0, 3, 2):\n# diff = int(date1[4 + i : 6 + i]) - int(date0[4 + i : 6 + i])\n\n# if diff > 0: return 2 #date0 다음 date1\n# elif diff < 0: return 1 #date1 다음 date0\n# else: continue #같은 월인 경우\n \n# return 0 #날짜가 일치하는 경우\n\ncounter = 0\nwhile counter < TOTAL_FETCH_COUNT:\n #기사 불러오기\n print(f\"네이버로부터 {currentPage} 번째 JSON 파일을 불러옵니다...\")\n try: out = requests.get(url + str(currentPage)).json()[\"result\"][\"newsList\"] #json 구조를 가진 dictionary\n except: raise Exception\n counter += 1\n print(out[newsCountPerPage - 1][\"dt\"][:8], \"일 뉴스까지 불러왔습니다.\")\n\n for k in range(len(out)): listJson.append(json.dumps(out[k], ensure_ascii=False)) #불러온 뉴스는 리스트에 추가\n \n currentPage += 1 #다음 페이지로 이동\n\njson_str = '{\"newsList\":[{' + \",\".join(listJson).replace('[', '{').replace(']', '}')[1:-1] + \"}]}\"\n\nprint(\"파일에 저장하고 있습니다...\")\nwith open(\"./out.json\", 'w', encoding=\"UTF8\") as jsonFile: jsonFile.write(json_str)\n\n#csv 파일 불러오기\ntry:\n print(\"csv 파일 만드는 중...\", csvFileName)\n csvFile = open(csvFileName, 'w', encoding=\"UTF8\", newline=\"\")\n csvObj = csv.writer(csvFile)\nexcept:\n print(\"csv 파일 열기 오류: \", csvFileName, \"을 쓸 수 없음\")\n exit(1)\n\n#json 파일 불러오기\ntry:\n print(\"json 파일 불러오는 중...\", jsonFileName)\n with open(jsonFileName, 'r', encoding=\"UTF8\") as jsonFile:\n jsonObj = json.load(jsonFile)[\"newsList\"]\nexcept:\n print(\"파일 읽기 오류: \", jsonFileName, \"이/가 존재하지 않음\")\n csvFile.close()\n exit(1)\n\n#json 파일에 있는 뉴스 기사의 수 저장\njsonFileLength = len(jsonObj)\n\n#뉴스 기사의 각 인덱스 리스트\njsonDicKeys = [\"tit\", \"subcontent\", \"dt\"]\n\n#제목 열 기록\ncsvObj.writerow([\"제목\", \"부제\", \"날짜\", \"시각\"])\n\nfor i in range(jsonFileLength):\n #csv 파일의 각 열이 될 리스트\n listToWrite = []\n\n for key in jsonDicKeys:\n if key == \"dt\": #dt => 날짜, 시각으로 분리하여 기록\n listToWrite.append(str(jsonObj[i][key][:8]))\n listToWrite.append(str(jsonObj[i][key][8:]))\n else:\n listToWrite.append(str(jsonObj[i][key]))\n\n csvObj.writerow(listToWrite) #csv파일에 리스트 기록\n\nprint(\"작업 완료: \", csvFileName, \"에 저장됨\")\ncsvFile.close()\n","repo_name":"GmelaN/csv_modifier","sub_path":"getRawDataFromNaver.py","file_name":"getRawDataFromNaver.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3440129002","text":"import csv\nimport os\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker\n\nimport cutter\n\n\nclass GraphView:\n def __init__(self):\n self.optimal_vertices = []\n self._fig, self.ax = plt.subplots()\n self.edges = dict()\n self.landmark_order = dict()\n self.vertices = []\n self.colors = ['#00FF00', '#7FFF00', '#FFFF00', '#FF7F00', '#FF0000']\n self.dividers = []\n\n def draw(self):\n self.draw_edges()\n self.draw_vertices()\n self.draw_startend()\n if len(self.optimal_vertices) != 0:\n self.draw_optimal_vertices()\n\n def draw_vertices(self):\n pointsx = [vertex[0] for vertex in self.vertices]\n pointsy = [vertex[1] for vertex in self.vertices]\n self.ax.scatter(pointsx, pointsy, s=20, c=\"gray\", zorder=2)\n\n def draw_edges(self):\n for key in self.edges:\n keysplit = [float(i) for i in key.split()]\n pointsx = [keysplit[0], keysplit[2]]\n pointsy = [keysplit[1], keysplit[3]]\n plt.plot(pointsx, pointsy, color=self.get_edge_color(self.edges[key]), zorder=1)\n\n def draw_startend(self, distance=3): # distance means how far will the start and end be\n speeds = [v[1] for v in self.vertices]\n y = (min(speeds) + max(speeds)) / 2\n self.draw_edge_vertex(-distance, y, 0)\n self.draw_edge_vertex(len(self.landmark_order) + distance, y, len(self.landmark_order) - 1)\n\n def draw_edge_vertex(self, x, y, landmarkx): # landmarkx means the landmark to which the edge vertex will connect\n self.ax.scatter(x, y, s=40, c=\"blue\", zorder=2)\n y_to_connect = [v[1] for v in self.vertices if v[0] == landmarkx]\n for connecty in y_to_connect:\n plt.plot([x, landmarkx], [y, connecty], color=\"gray\", zorder=1)\n\n def draw_optimal_vertices(self):\n pointsx = [vertex[0] for vertex in self.optimal_vertices]\n pointsy = [vertex[1] for vertex in self.optimal_vertices]\n self.ax.scatter(pointsx, pointsy, s=40, c=\"blue\", zorder=2)\n\n def load_models(self, landmark_path, dir_path, optimal_path=None):\n self.load_landmarks(landmark_path)\n self.load_dir(dir_path)\n self.calc_edges()\n self.calc_dividers()\n if optimal_path:\n self.load_optimal(optimal_path)\n\n def load_dir(self, dir_path):\n for filename in os.listdir(dir_path):\n self.load_file(os.path.join(dir_path, filename))\n plt.gca().invert_yaxis()\n\n def load_file(self, file_path):\n matrix = load_model_matrix(file_path)\n self.load_file_edges(matrix)\n self.load_file_vertices(matrix)\n\n def load_optimal(self, file_path):\n with open(file_path, 'r') as f:\n csv_reader = csv.reader(f)\n for row in csv_reader:\n landmark = self.landmark_order[key_from_gps(float(row[0]), float(row[1]))]\n self.optimal_vertices.append((landmark, int(row[2])))\n\n def load_file_edges(self, matrix):\n for i in range(len(matrix) - 1):\n row1 = matrix[i]\n row2 = matrix[i + 1]\n key = self.edge_key(row1[0], row1[1], row1[2], row2[0], row2[1], row2[2])\n if key in self.edges:\n self.edges[key][0] += row1[3]\n self.edges[key][1] += 1\n else:\n self.edges[key] = [row1[3], 1]\n\n def load_file_vertices(self, matrix):\n self.vertices.extend([(self.landmark_order[key_from_gps(row[0], row[1])], row[2]) for row in matrix])\n\n def load_landmarks(self, input_path):\n with open(input_path, 'r') as f:\n csv_reader = csv.reader(f)\n i = 0\n for row in csv_reader:\n self.landmark_order[key_from_gps(row[0], row[1])] = i\n i += 1\n\n def edge_key(self, lat1, lon1, speed1, lat2, lon2, speed2):\n return f'{self.landmark_order[key_from_gps(lat1, lon1)]} {speed1} ' \\\n f'{self.landmark_order[key_from_gps(lat2, lon2)]} {speed2} '\n\n def calc_edges(self):\n for key in self.edges:\n self.edges[key] = self.edges[key][0] / self.edges[key][1]\n\n def calc_dividers(self):\n limit = max([self.edges[key] for key in self.edges])\n step = limit / len(self.colors)\n self.dividers = [step * i for i in range(1, len(self.colors))]\n\n def get_edge_color(self, value):\n for i in range(len(self.dividers)):\n if value < self.dividers[i]:\n return self.colors[i]\n return self.colors[-1]\n\n def show(self):\n plt.gca().invert_yaxis()\n self.ax.set_ylabel('speed')\n self.ax.spines['top'].set_visible(False)\n self.ax.spines['right'].set_visible(False)\n self.ax.yaxis.set_major_locator(ticker.MultipleLocator(5))\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False) # labels along the bottom edge are off\n plt.tight_layout()\n plt.show()\n\n\ndef key_from_gps(lat, lon):\n return f'{lat},{lon}'\n\n\ndef load_model_matrix(file_path):\n temp_matrix = []\n with open(file_path, 'r') as f:\n csv_reader = csv.reader(f)\n for row in csv_reader:\n temp_matrix.append([float(row[0]), float(row[1]), int(row[2]), float(row[3])])\n return temp_matrix\n\n\ndef cut():\n cutter.cut_dir(r\"C:\\Users\\Lior\\Dropbox\\EE-Drive\\Database\\Eliav To Home 2\\Model Files\",\n r\"C:\\Users\\Lior\\Dropbox\\EE-Drive\\Database\\Cut for FuelPlot\",\n (31.860132199862605, 34.712515094777174), (31.84180734177865, 34.710048755663365))\n\n\nif __name__ == '__main__':\n graph = GraphView()\n graph.load_models(r\"C:\\Users\\Lior\\Dropbox\\EE-Drive\\Database\\Cut for FuelPlot\\eliav 1 2019-11-14-15-19-35.csv\",\n r\"C:\\Users\\Lior\\Dropbox\\EE-Drive\\Database\\Cut for FuelPlot\")\n graph.draw()\n graph.show()\n","repo_name":"YuvalYY/EcoDrivePythonLast1","sub_path":"figure3.py","file_name":"figure3.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4428394556","text":"import time\n\nimport psutil\nimport logging\nfrom threading import Lock\nimport threading\nimport configuration\n\n\n\n# NOTE: for a realistic emulation the sensor value __secsleft runs \"asynchronously\", independent of the scheduler.\n# Another possibility is to trigger the update of the readings in the scheduler by calling do_batt().\n##########################################################################################################\nPC_UTIL_PERIOD_SEC = None\nMAX_INT = configuration.MAX_INT\nMIN_INT = configuration.MIN_INT\nMAX_VAL = configuration.MAX_VAL\nMIN_VAL = configuration.MIN_VAL\n\n\n\nclass pc_sensor():\n CLOCK_PERIOD_SEC = None\n __event = None\n __count = 0\n __secsleft = 0\n __lock = Lock()\n __count_sync = 0\n __secsleft_sync = 0\n __lock_sync = Lock()\n __cpu_percent = 0\n __cpu_percent_sync = 0\n __lock_cpu = Lock()\n __lock_cpu_sync = Lock()\n\n def __init__(self, event, CLOCK_PERIOD_SEC):\n logging.info('init pc_sensor')\n self.__event = event\n self.CLOCK_PERIOD_SEC = CLOCK_PERIOD_SEC\n self.updateGuiDefs()\n thread_name = \"pc_sensor_thread\"\n pc_info_thread = threading.Thread(name=thread_name, target=self.__thread_pc_sensor, args=(thread_name,))\n pc_info_thread.start()\n\n def updateGuiDefs(self):\n global PC_UTIL_PERIOD_SEC\n PC_UTIL_PERIOD_SEC = self.CLOCK_PERIOD_SEC[0] * configuration.PC_UTIL_PER_IN_CLK_PER\n logging.info(\"PC_UTIL_PERIOD_SEC = \" + str(PC_UTIL_PERIOD_SEC))\n\n def __thread_pc_sensor(self, name):\n logging.info(\"Thread %s: starting\", name)\n # thread loop\n while self.__event.evt_close_app.is_set() == False:\n # battery\n b = psutil.sensors_battery()\n if int(b.secsleft) > 0:\n self.__lock.acquire()\n self.__secsleft = int(b.secsleft)\n self.__lock.release()\n else:\n self.__count = self.__count + 1\n self.__lock.acquire()\n self.__secsleft = self.__count\n self.__lock.release()\n # CPU percent\n self.__lock_cpu.acquire()\n self.__cpu_percent = psutil.cpu_percent()\n self.__lock_cpu.release()\n # this runs close to scheduler clock but still asynchronous to it..\n # BUG: 10ms delay\n # self.__event.evt_wake_up.wait(PC_UTIL_PERIOD_SEC)\n time.sleep(PC_UTIL_PERIOD_SEC)\n logging.info(\"Thread %s: finished!\", name)\n\n def do_pc_info(self):\n # battery\n b = psutil.sensors_battery()\n if int(b.secsleft) > 0:\n self.__lock_sync.acquire()\n self.__secsleft_sync = int(b.secsleft)\n self.__lock_sync.release()\n else:\n self.__count_sync = self.__count_sync + 1\n self.__lock_sync.acquire()\n self.__secsleft_sync = self.__count_sync\n self.__lock_sync.release()\n # CPU percent\n self.__lock_cpu_sync.acquire()\n self.__cpu_percent_sync = psutil.cpu_percent()\n self.__lock_cpu_sync.release()\n\n def get_secsleft(self):\n self.__lock.acquire()\n __secsleft = self.__secsleft\n self.__lock.release()\n return __secsleft\n\n def get_secsleft_sync(self):\n self.__lock_sync.acquire()\n __secsleft_sync = self.__secsleft_sync\n self.__lock_sync.release()\n return __secsleft_sync\n\n def get_cpu_percent(self):\n self.__lock_cpu.acquire()\n __cpu_percent = self.__cpu_percent\n self.__lock_cpu.release()\n return __cpu_percent\n\n def get_cpu_percent_sync(self):\n self.__lock_cpu_sync.acquire()\n __cpu_percent_sync = self.__cpu_percent_sync\n self.__lock_cpu_sync.release()\n return __cpu_percent_sync\n\n def get_cpu_percent_int(self):\n # transform value according to this formula\n # relating the range of the real measurement (e.g. physical temperature) with the range of the\n # integer variable we need to use in the FPGA\n # y = y1 + (x - x1)*((y2 - y1)/(x2 - x1))\n self.__lock_cpu.acquire()\n ret_val = int(MIN_INT + (self.__cpu_percent - MIN_VAL) * ((MAX_INT - MIN_INT) / (MAX_VAL - MIN_VAL)))\n self.__lock_cpu.release()\n return ret_val\n\n def get_cpu_percent_sync_int(self):\n # transform value according to this formula\n # relating the range of the real measurement (e.g. physical temperature) with the range of the\n # integer variable we need to use in the FPGA\n # y = y1 + (x - x1)*((y2 - y1)/(x2 - x1))\n self.__lock_cpu_sync.acquire()\n ret_val = int(MIN_INT + (self.__cpu_percent_sync - MIN_VAL) * ((MAX_INT - MIN_INT) / (MAX_VAL - MIN_VAL)))\n self.__lock_cpu_sync.release()\n return ret_val\n\n\n","repo_name":"ClarkFieseln/FPGA_HW_SIM_FWK","sub_path":"python/hw_sim_fwk/pc_sensor.py","file_name":"pc_sensor.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"42231341251","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:\n if not head or not head.next:\n return head\n dummyhead=dummy = ListNode(0,None)\n prev = head\n succ = head.next\n \n if prev.val != succ.val:\n dummy.next = prev\n dummy = dummy.next\n \n \n \n while succ:\n if succ.val == prev.val:\n succ = succ.next\n else:\n prev = succ\n succ = succ.next\n if succ and prev.val != succ.val:\n dummy.next = prev\n dummy = dummy.next\n if prev.next ==None and succ==None:\n dummy.next = prev\n else:\n dummy.next = None\n return dummyhead.next\n \n \n \n ","repo_name":"KebiraIdehmad/Leetcode-Solved-Questions","sub_path":"0082-remove-duplicates-from-sorted-list-ii/0082-remove-duplicates-from-sorted-list-ii.py","file_name":"0082-remove-duplicates-from-sorted-list-ii.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75029651308","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.pipeline import make_pipeline\nimport pickle\n\ncar_df = pd.read_csv('quikr_car.csv')\n\n#QUALITY\n\n# year has many non-year values\n# year object to int\n# price has ask for price\n# kms_driven has kms with integers\n# kms_driven object to int\n# kms_driven has nan values\n# fuel_type has nan values\n# keep first 3 words of name of car\n\nbackup_df = car_df.copy()\n\ncar_df = car_df.dropna()\n\ncar_df = car_df[car_df['year'].str.isnumeric()]\ncar_df['year'] = car_df['year'].astype(int)\n\ncar_df = car_df[car_df['Price']!=\"Ask For Price\"]\ncar_df['Price'] = car_df['Price'].str.replace(',','').astype(int)\n\ncar_df['kms_driven'] = car_df['kms_driven'].str.replace(',','')\ncar_df['kms_driven'] = car_df['kms_driven'].str.replace('kms','').astype(int)\n\ncar_df['fuel_type'] = car_df['fuel_type'].fillna(True)\n\ncar_df['name'] = car_df['name'].str.split(' ').str.slice(0,3).str.join(' ')\n\ncar_df = car_df[car_df['Price']<6e6].reset_index(drop=True)\n\ncar_df.to_csv(\"Cleaned_Car.csv\")\n\nx = car_df.drop(columns='Price')\ny = car_df['Price']\n\nx_train, x_test, y_train, y_test= train_test_split(x, y, test_size=0.2,random_state=661)\n\nohe = OneHotEncoder()\nohe.fit(x[['name','company','fuel_type',]])\n\ncolumn_trans = make_column_transformer((OneHotEncoder(categories=ohe.categories_),['name','company','fuel_type']), remainder='passthrough')\nlr = LinearRegression()\npipe = make_pipeline(column_trans,lr)\n\npipe.fit(x_train,y_train)\n\ny_pred = pipe.predict(x_test)\n#error = r2_score(y_test,y_pred)\n#print('R-squared score:', error)\n\npickle.dump(pipe,open('LinearRegressionModel.pkl','wb'))\npipe.predict(pd.DataFrame(columns=['name','company','year','kms_driven','fuel_type'],data=np.array(['Maruti Suzuki Swift','Maruti',2019,100,'Petrol']).reshape(1,5)))\n","repo_name":"anandjohnbaby/Car-Price-Prediction-System","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3731482736","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport time\nimport threading\nimport random\n\n\nclass Pump():\n\tname = None\n\tpriority = 0\n\tperiod = 0\n\texecTime = 0\n\tlast_execTime = 0\n\tlast_deadline = 0\n\treset = 0\n\tdef __init__(self, name, period, execTime, output, last_exec, target):\n\t\tself.name = name;\n\t\tself.period = period;\n\t\tself.execTime = execTime;\n\t\tself.output = output;\n\t\tself.last_execTime = last_exec;\n\t\tself.target = target;\n\t\tself.reset = execTime;\n\t\t\n\tdef run(self):\n\t\tself.last_execTime = datetime.datetime.now()\n\t\tprint(self.name + \" : Starting to pump (\" + self.last_execTime.strftime(\"%H:%M:%S\") + \") : execution time = \" + str(self.execTime))\n\n\t\twhile(self.execTime != 0):\n\t\n\t\t\tself.execTime -= 1\n\t\n\t\t\ttime.sleep(1)\n\t\t\t\n\t\t\tif (self.execTime <= 0):\n\t\t\t\tself.execTime = self.reset\n\t\t\t\tif(self.target.storage < self.target.capacity):\n\t\t\t\t\tself.target.storage += self.output;\n\t\t\t\t\tif(self.target.storage < self.target.capacity):\n\t\t\t\t\t\tself.target.storage = self.target.capacity;\n\t\t\t\t\tprint(self.name + \" : Outputting to tank normally (\" + datetime.datetime.now().strftime(\"%H:%M:%S\") + \")\")\n\t\t\t\t\treturn\n\t\t\t\tif(self.target.storage >= self.target.capacity):\n\t\t\t\t\tprint(self.name + \" : Tank is full, wasting oil (\" + datetime.datetime.now().strftime(\"%H:%M:%S\") + \")\")\n\t\t\t\t\treturn\n\t\t\nclass Tank():\n\t\n\tstorage = 0\n\t\n\tdef __init__(self, capacity):\n\t\tself.capacity = capacity;\n\t\t\nclass Machine():\n\tname = None\n\tpriority = 0\n\tperiod = 0\n\texecTime = 0\n\tlast_execTime = 0\n\treset = 0\n\tlast_deadline = 0\n\tmachineType = None\n\tdef __init__(self, name, period, execTime, output, inpt, last_exec, tank, mType, target):\n\t\tself.name = name;\n\t\tself.period = period;\n\t\tself.execTime = execTime;\n\t\tself.output = output;\n\t\tself.inpt = inpt;\n\t\tself.last_execTime = last_exec;\n\t\tself.target = target;\n\t\tself.machineType = mType;\n\t\tself.tank = tank;\n\t\tself.reset = execTime;\n\t\t\n\tdef run(self):\n\t\tself.last_execTime = datetime.datetime.now()\n\t\t\n\t\tif(self.tank.storage < self.inpt):\n\t\t\treturn\n\t\telse:\n\t\t\tprint(self.name + \" : Starting to process (\" + self.last_execTime.strftime(\"%H:%M:%S\") + \") : execution time = \" + str(self.execTime))\n\t\t\tself.tank.storage -= self.inpt;\n\t\t\twhile(self.execTime != 0):\n\t\t\t\tself.execTime -= 1\n\t\t\n\t\t\t\ttime.sleep(1)\n\t\t\n\t\t\t\tif (self.execTime <= 0):\n\t\t\t\t\tself.execTime = self.reset\n\t\t\t\t\tself.target.storage += self.output;\n\t\t\t\t\tprint(self.name + \" : Outputting to pile normally (\" + datetime.datetime.now().strftime(\"%H:%M:%S\") + \")\")\n\t\t\t\t\treturn\n\t\t\n\t\t\nif __name__ == '__main__':\n\t\n\t#initialise\n\tstop = False\n\ttestTime = datetime.timedelta(minutes=2)\n\tstartTime = datetime.datetime.now()\n\tprint(\"TIME\")\n\tprint(testTime+startTime)\n\ttank = Tank(capacity = 50)\n\ttank.storage = 0\n\twheelStock = Tank(capacity = 999)\n\tmotorStock = Tank(capacity = 999)\n\tpump1Prio = -1\n\tpump2Prio = -1\n\tmotorPrio = -1\n\twheelPrio = -1\n\tlast_exec = datetime.datetime.now()\n\tprint('Test')\n\tpump1 = Pump(name= \"Pump 1\", period = 5, execTime = 2, output = 10, last_exec=0, target=tank);\n\tpump2 = Pump(name= \"Pump 2\", period=15, execTime=3, output=20, last_exec=0, target=tank);\n\twheelMachine = Machine(name=\"Wheel Machine\", period=5,execTime=3,output=1,inpt=5,last_exec=0,tank=tank,mType='Wheel',target=wheelStock)\n\tmotorMachine = Machine(name=\"Motor Machine\", period=5,execTime=5,output=1,inpt=25,last_exec=0,tank=tank,mType='Motor',target=motorStock)\n\ttaskList = [pump1, pump2, wheelMachine, motorMachine]\n\t\n\t\n\twhile(stop == False):\n\t\tprint(\"Tank: \" + str(tank.storage) + \"/\" + str(tank.capacity))\n\t\tif(datetime.datetime.now() >= (startTime + testTime)):\n\t\t\tstop = True\n\t\tif(tank.storage == 50):\n\t\t\tprint(\"IF test\")\n\t\t\tpump1Prio = -1\n\t\t\tpump2Prio = -1\n\t\t\tif(motorStock.storage < wheelStock.storage/4):\n\t\t\t\tmotorPrio = 3\n\t\t\t\twheelPrio = 2\n\t\t\telse:\n\t\t\t\twheelPrio = 3\n\t\t\t\tmotorPrio = -1\n\t\tif(tank.storage == 0):\n\t\t\tpump2Prio = 3\n\t\t\tpump1Prio = -1\n\t\t\tmotorPrio = -1\n\t\t\twheelPrio = 1\n\t\tif(0 < tank.storage and tank.storage < 25):\n\t\t\tpump2Prio = 3\n\t\t\tpump1Prio = 2\n\t\t\twheelPrio = -1\n\t\t\tmotorPrio = -1\n\t\tif(25 < tank.storage and tank.storage < 50):\n\t\t\tif(motorStock.storage < wheelStock.storage/4):\n\t\t\t\tpump1Prio = 3\n\t\t\t\tmotorPrio = 2\n\t\t\t\twheelPrio = 1\n\t\t\tif(motorStock.storage >= wheelStock.storage/4):\n\t\t\t\tpump1Prio = 1\n\t\t\t\twheelPrio = 3\n\t\t\t\tmotorPrio = -1\n\t\t\t\t\n\t\tpump1.priority = pump1Prio\n\t\tpump2.priority = pump2Prio\n\t\twheelMachine.priority = wheelPrio\n\t\tmotorMachine.priority = motorPrio\n\t\t\n\t\texecList = []\n\t\tfor task in taskList:\n\t\t\tif(task.priority == 3):\n\t\t\t\texecList.append(task)\n\t\tfor task in taskList:\n\t\t\tif(task.priority == 2):\n\t\t\t\texecList.append(task)\t\n\t\tfor task in taskList:\n\t\t\tif(task.priority == 1):\n\t\t\t\texecList.append(task)\t\t\n\t\tfor task in taskList:\n\t\t\tif(task.priority == 0):\n\t\t\t\texecList.append(task)\t\t\n\t\tfor toRun in execList:\n\t\t\ttoRun.run()\n\t\t\n\tprint(\"Wheel Stock: \" + str(wheelStock.storage) + \". Motor Stock: \" + str(motorStock.storage))","repo_name":"Zankha/exam_harly","sub_path":"Exam/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20997011082","text":"from django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom ppp.views import AddPPPRegistrationProtocolView, SavedPPPRegistrationProtocolListView, \\\n EditPPPRegistrationProtocolView, PendingPPPRegistrationProtocolListView, PPPRegistrationProtocolListView, \\\n PdfVersionView, DownloadListInExcelView\n\nurlpatterns = [\n path('registration-protocol/add/', AddPPPRegistrationProtocolView.as_view(), name='add_ppp_registration_protocol'),\n path('registration-protocol//edit/', EditPPPRegistrationProtocolView.as_view(), name='edit_ppp_registration_protocol'),\n path('registration-protocols/saved-list/', SavedPPPRegistrationProtocolListView.as_view(), name='saved_ppp_registration_protocols_list'),\n path('registration-protocols/pending-list/', PendingPPPRegistrationProtocolListView.as_view(), name='pending_ppp_registration_protocols_list'),\n path('registration-protocols/list/', PPPRegistrationProtocolListView.as_view(), name='ppp_registration_protocols_list'),\n path('registration-protocols//pdf/', PdfVersionView.as_view(), name='pdf_of_ppp_registration_protocol'),\n path('registration-protocols/download-in-excel/', DownloadListInExcelView.as_view(), name='ppp_registration_protocol_download_in_excel'),\n]\n","repo_name":"Abdulrakhmon/efito_for_profile","sub_path":"apps/ppp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39025025248","text":"from flask import jsonify\nfrom flask_restful import Resource, reqparse\nfrom app.models import ExchangeData\nfrom .validators import amount_validator, currency_validator\nfrom .helpers import calculate_price, get_price_info, NotSupportedCurrecyException\nfrom .schemas import ExchangeDataSchema\nfrom requests.exceptions import ConnectionError, ConnectTimeout\n\n\nclass ExchangeDataResource(Resource):\n def post(self):\n request_parser = reqparse.RequestParser()\n request_parser.add_argument('amount', type=amount_validator, location='form', required=True)\n request_parser.add_argument('currency', type=currency_validator, location='form', required=True)\n\n args = request_parser.parse_args()\n currency = args.get('currency')\n amount = args.get('amount')\n\n try:\n rates = get_price_info(currency)\n except (ConnectionError, ConnectTimeout) as e:\n response = jsonify({'error': str(e)})\n response.status_code = 400\n return response\n\n try:\n rate = rates[currency]\n except NotSupportedCurrecyException:\n response = jsonify({'error': \"Exchange rate not found\"})\n response.status_code = 400\n return response\n\n dec_amount, dec_rate, dec_price = calculate_price(amount, rate)\n\n exchange_data = ExchangeData(currency=currency, amount=dec_amount, price=dec_price, rate=dec_rate)\n exchange_data.save()\n\n sch = ExchangeDataSchema()\n response = jsonify(sch.dump(exchange_data))\n response.status_code = 201\n\n return response\n\n\nclass LastOperationsResouce(Resource):\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('num_records', type=amount_validator, location='args', required=False)\n parser.add_argument('currency', type=currency_validator, location='args', required=False)\n\n args = parser.parse_args()\n currency = args.get('currency', \"\")\n num_records = args.get('num_records', None)\n num_records = num_records\n if not num_records and not currency:\n num_records = 1\n\n queryset = ExchangeData.query\n\n if currency:\n queryset = queryset.filter_by(currency=currency)\n\n queryset = queryset.order_by(ExchangeData.id.desc())\n\n if num_records:\n queryset = queryset.limit(num_records)\n\n sch = ExchangeDataSchema()\n response = jsonify([sch.dump(i) for i in queryset])\n response.status_code = 200\n\n return response\n","repo_name":"micahaza/flask-mysql-api-docker","sub_path":"app/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18987352530","text":"# Definition for binary tree with next pointer.\nclass TreeLinkNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.next = None\n\nclass Solution(object):\n def connect(self, root):\n \"\"\"\n :type root: TreeLinkNode\n :rtype: nothing\n \"\"\"\n if not root:\n return\n up = root\n down = up.left\n while down:\n pos = up\n while pos:\n pos.left.next = pos.right\n if pos.next:\n pos.right.next = pos.next.left\n pos = pos.next\n up = up.left\n down = down.left\n","repo_name":"gitttttt/lc","sub_path":"q116.py","file_name":"q116.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32124276716","text":"class InvalidColumnError(Exception):\n '''Error when trying to add a piece to the column thatis already filled.\n '''\n pass\n\n\nclass Board:\n '''Class that defines the connect four board, and holds methods for\n the board class.\n '''\n __RED = '\\033[91m'\n __YELLOW = '\\033[93m'\n __ENDC = '\\033[0m'\n\n def __init__(self):\n '''Inits with a board with the dimensions of 6 by 7 and fills the\n the board with the placeholder.\n '''\n row, col = 6, 7\n self.__placeholder = '.'\n self.__board = [\n [self.__placeholder for _ in range(col)] for _ in range(row)]\n self.__col_counter = [row - 1 for _ in range(col)]\n\n def __str__(self):\n '''Turns the board into a a formatted string that displays the board\n when it is printed.\n\n Returns:\n str -- The board as a string.\n '''\n board = ''\n for row in range(len(self.__board)):\n for col in range(len(self.__board[row])):\n if self.__board[row][col] == 'R':\n board += self.__RED + \\\n self.__board[row][col] + self.__ENDC + (' ' * 2)\n elif self.__board[row][col] == 'Y':\n board += self.__YELLOW + \\\n self.__board[row][col] + self.__ENDC + (' ' * 2)\n else:\n board += self.__board[row][col] + (' ' * 2)\n board += '\\n'\n for x in range(len(self.__col_counter)):\n board += str(x) + (' ' * 2)\n return board\n\n def is_filled(self):\n '''Returns whether the board is filled or not.\n\n Returns:\n Boolean -- True or False\n '''\n for row in range(len(self.__board)):\n for col in range(len(self.__board[row])):\n if self.__board[row][col] == self.__placeholder:\n return False\n return True\n\n def put_piece(self, player):\n '''Method that puts the corresponding color piece for the\n given column from the players input.\n\n Arguments:\n player {Player} -- The player that will put the piece.\n\n Raises:\n InvalidColumnError -- raise error when the column is filled.\n '''\n col = player.get_input()\n if self.__col_counter[col] < 0:\n raise InvalidColumnError\n else:\n self.__board[self.__col_counter[col]][col] = player.color\n self.__col_counter[col] -= 1\n\n def __horizontal(self, color):\n '''Checkes whether there is a connect four horizontally\n for the given color.\n\n Arguments:\n color {Player.color} -- either 'R' or 'Y'\n\n Returns:\n Boolean -- either True or False\n '''\n for row in range(len(self.__board) - 1, -1, -1):\n for col in range(0, len(self.__board[row]) - 3):\n if self.__board[row][col] == color and \\\n self.__board[row][col + 1] == color and \\\n self.__board[row][col + 2] == color and \\\n self.__board[row][col + 3] == color:\n return True\n return False\n\n def __vertical(self, color):\n '''Checkes whether there is a connect four vertically\n for the given color.\n\n Arguments:\n color {Player.color} -- either 'R' or 'Y'\n\n Returns:\n Boolean -- either True or False\n '''\n for col in range(len(self.__board[0])):\n for row in range(len(self.__board) - 1, -1, -1):\n if self.__board[row][col] == color and \\\n self.__board[row - 1][col] == color and \\\n self.__board[row - 2][col] == color and \\\n self.__board[row - 3][col] == color:\n return True\n return False\n\n def __up_diagonal(self, color):\n '''Checkes whether there is a connect four diagonally going up\n for the given color.\n\n Arguments:\n color {Player.color} -- either 'R' or 'Y'\n\n Returns:\n Boolean -- either True or False\n '''\n for row in range(len(self.__board) - 1, 2, -1):\n for col in range(0, len(self.__board[row]) - 3):\n if self.__board[row][col] == color and \\\n self.__board[row - 1][col + 1] == color and \\\n self.__board[row - 2][col + 2] == color and \\\n self.__board[row - 3][col + 3] == color:\n return True\n return False\n\n def __down_diagonal(self, color):\n '''Checkes whether there is a connect four diagonally going down\n for the given color.\n\n Arguments:\n color {Player.color} -- either 'R' or 'Y'\n\n Returns:\n Boolean -- either True or False\n '''\n for row in range(len(self.__board) - 1, 2, -1):\n for col in range(3, len(self.__board[row]), 1):\n if self.__board[row][col] == color and \\\n self.__board[row - 1][col - 1] == color and \\\n self.__board[row - 2][col - 2] == color and \\\n self.__board[row - 3][col - 3] == color:\n return True\n return False\n\n def is_connect_four(self, color):\n '''Checkes whether there is a connect four for the given color.\n\n Arguments:\n color {Player.color} -- either 'R' or 'Y'\n\n Returns:\n Boolean -- either True or False\n '''\n return self.__horizontal(color) or self.__vertical(color) or \\\n self.__up_diagonal(color) or self.__down_diagonal(color)\n","repo_name":"mahimarib/connect_four","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11971646760","text":"from document import Document\nfrom validate import validate_enrollment, parse_document\nfrom bucket import Bucket \nfrom bitcoinecdsa import sign, verify, pubkey, pubkey_to_address\nfrom order import Order\nimport os\nimport click\nfrom ui import shorten, get_choice\n\n\ndef mediator_prompt(rein, eligible_mediators):\n mediators = unique(eligible_mediators, 'Mediator public key')\n key = pubkey(rein.user.dkey)\n i = 0\n for m in mediators:\n if m[\"Mediator public key\"] == key:\n mediators.remove(m)\n continue\n click.echo('%s - %s - Fee: %s - Public key: %s' % (str(i), m['User'], m['Mediator fee'], m['Mediator public key']))\n i += 1\n if len(mediators) == 0:\n click.echo(\"None found.\")\n return None\n choice = get_choice(mediators, 'mediator')\n if choice == 'q':\n return False\n return mediators[choice]\n\n# called in offer()\ndef bid_prompt(rein, bids):\n \"\"\"\n Prompts user to choose a bid on one of their jobs. This means they should be the job creator and\n not the worker or mediator.\n \"\"\"\n i = 0\n valid_bids = []\n key = pubkey(rein.user.dkey)\n for b in bids:\n if 'Description' not in b or b['Job creator public key'] != key:\n continue \n click.echo('%s - %s - %s - %s - %s BitCoin' % (str(i), b['Job name'], b[\"Worker\"],\n shorten(b['Description']), b['Bid amount (BTC)']))\n valid_bids.append(b)\n i += 1\n if len(valid_bids) == 0:\n click.echo('No bids available.')\n return None\n choice = get_choice(valid_bids, 'bid')\n if choice == 'q':\n click.echo('None chosen.')\n return False\n bid = valid_bids[choice]\n click.echo('You have chosen %s\\'s bid.\\n\\nFull description: %s\\nBid amount (BTC): %s\\n\\nPlease review carefully before accepting. (Ctrl-c to abort)' % \n (bid['Worker'], bid['Description'], bid['Bid amount (BTC)']))\n return bid\n\n\ndef job_prompt(rein, jobs):\n \"\"\"\n Prompt user for jobs they can bid on. Filters out jobs they created or are mediator for.\n \"\"\"\n key = pubkey(rein.user.dkey)\n valid_jobs = []\n for j in jobs:\n if j['Job creator public key'] != key and j['Mediator public key'] != key:\n valid_jobs.append(j)\n \n i = 0\n for j in valid_jobs:\n click.echo('%s - %s - %s - %s' % (str(i), j[\"Job creator\"],\n j['Job name'], shorten(j['Description'])))\n i += 1\n choice = get_choice(valid_jobs, 'job')\n if choice == 'q':\n return False\n job = valid_jobs[choice]\n click.echo('You have chosen a Job posted by %s.\\n\\nFull description: %s\\n\\nPlease pay attention '\n 'to each requirement and provide a time frame to complete the job. (Ctrl-c to abort)\\n' % \n (job['Job creator'], job['Description']))\n return job\n\n\ndef delivery_prompt(rein, choices, detail='Description'):\n choices = unique(choices, 'Job ID')\n i = 0\n for c in choices:\n if 'Bid amount (BTC)' not in c:\n continue\n if detail in c:\n click.echo('%s - %s - %s BTC - %s' % (str(i), c['Job name'], c['Bid amount (BTC)'], shorten(c[detail])))\n else:\n click.echo('%s - %s - %s BTC - %s' % (str(i), c['Job name'], c['Bid amount (BTC)'], shorten(c['Description'])))\n i += 1\n choice = get_choice(choices, 'job')\n if choice == 'q':\n return None\n chosen = choices[choice]\n click.echo('You have chosen to post deliverables. The following is from your winning bid.'\n '\\n\\nDescription: %s\\n\\nPlease review carefully before posting deliverables. '\n 'This will be public and reviewed by mediators if disputed. (Ctrl-c to abort)\\n' % \n (chosen['Description'],))\n return chosen\n\n\ndef accept_prompt(rein, choices, detail='Description'):\n i = 0\n click.echo(\"Offers and Deliveries\")\n click.echo(\"---------------------\")\n for c in choices:\n if 'Primary escrow redeem script' not in c:\n continue\n if detail in c:\n click.echo('%s: %s - %s - %s - %s' % (c['state'].title(), str(i),\n c['Job name'], c['Job ID'], shorten(c[detail])))\n else:\n click.echo('%s: %s - %s - %s - %s' % (c['state'].title(), str(i),\n c['Job name'], c['Job ID'], shorten(c['Description'])))\n i += 1\n choice = get_choice(choices, 'delivery or offer')\n if choice == 'q':\n return None\n chosen = choices[choice]\n if detail in chosen:\n contents = chosen[detail]\n else:\n contents = chosen['Description']\n click.echo('You have chosen to accept the following deliverables. \\n\\n%s: %s\\nAccepted Bid amount (BTC): %s\\n'\n 'Primary escrow redeem script: %s\\n'\n 'Worker address: %s\\n\\n'\n 'Mediator escrow redeem script: %s\\n'\n 'Mediator address: %s\\n'\n '\\nPlease review carefully before accepting. Once you upload your signed statement, the mediator should no '\n 'longer provide a refund. (Ctrl-c to abort)\\n' % \n (detail,\n contents, chosen['Bid amount (BTC)'],\n chosen['Primary escrow redeem script'],\n pubkey_to_address(chosen['Worker public key']),\n chosen['Mediator escrow redeem script'],\n pubkey_to_address(chosen['Mediator public key'])\n )\n )\n return chosen\n\n\ndef dispute_prompt(rein, choices, detail='Description'):\n i = 0\n for c in choices:\n if 'Primary escrow redeem script' not in c:\n continue\n if detail in c:\n click.echo('%s - %s - %s - %s' % (str(i), c['Job name'], c['Job ID'], shorten(c[detail])))\n else:\n click.echo('%s - %s - %s - %s' % (str(i), c['Job name'], c['Job ID'], shorten(c['Description'])))\n i += 1\n choice = get_choice(choices, 'job')\n if choice == 'q':\n return None\n chosen = choices[choice]\n if detail in chosen:\n contents = chosen[detail]\n else:\n contents = chosen['Description']\n click.echo('You have chosen to dispute the following deliverables. \\n\\n%s: %s\\n\\nPlease provide as much detail as possible. '\n 'For the primary payment, you should build and sign one that refunds you at %s. (Ctrl-c to abort)\\n' % \n (detail, contents, rein.user.daddr))\n return chosen\n\n\ndef resolve_prompt(rein, choices, detail='Dispute detail'):\n i = 0\n for c in choices:\n if 'Primary escrow redeem script' not in c:\n continue\n click.echo('%s - %s - %s - %s' % (str(i), c['Job name'], c['Job ID'], shorten(c[detail])))\n i += 1\n choice = get_choice(choices, 'dispute')\n if choice == 'q':\n return None\n chosen = choices[choice]\n click.echo('You have chosen to resolve this dispute. \\n\\n%s: %s\\n\\n'\n 'For the mediator payment, you should build and sign one that pays you at %s. (Ctrl-c to abort)\\n' %\n (detail, chosen[detail], rein.user.daddr))\n return chosen\n\n\ndef assemble_document(title, fields):\n \"\"\"\n Prompts to fill in any gaps in form values, then builds document with all fields.\n \"\"\"\n data = []\n for field in fields:\n entry = {}\n entry['label'] = field['label']\n if 'validator' in field.keys():\n valid = False\n while not valid:\n answer = click.prompt(field['label'])\n valid = field['validator'](answer)\n entry['value'] = answer\n elif 'value' in field.keys():\n entry['value'] = field['value']\n elif 'value_from' in field.keys():\n entry['value'] = field['value_from'][field['label']]\n elif 'not_null' in field.keys() and field['not_null']:\n entry['value'] = field['not_null'][field['label']]\n else:\n entry['value'] = click.prompt(field['label'])\n data.append(entry)\n document = \"Rein %s\\n\" % title\n for entry in data:\n document = document + entry['label'] + \": \" + entry['value'] + \"\\n\"\n return document[:-1]\n\n\ndef sign_and_store_document(rein, doc_type, document, signature_address=None, signature_key=None, store=True):\n \"\"\"\n Save document if no signature key provided. Otherwise sign document, then validate and store it.\n \"\"\"\n validated = False\n if signature_key is None: # signing will happen outside app\n f = open(doc_type + '.txt', 'w')\n f.write(document)\n f.close()\n click.echo(\"\\n%s\\n\" % document)\n done = False\n while not done:\n filename = click.prompt(\"File containing signed document\", type=str, default=doc_type + '.sig.txt')\n if os.path.isfile(filename):\n done = True\n f = open(filename, 'r')\n signed = f.read()\n res = validate_enrollment(signed)\n if res:\n validated = True\n else: # sign with stored delegate key\n signature = sign(signature_key, document)\n validated = verify(signature_address, document, signature)\n\n if validated:\n # insert signed document into documents table\n b = \"-----BEGIN BITCOIN SIGNED MESSAGE-----\"\n c = \"-----BEGIN SIGNATURE-----\"\n d = \"-----END BITCOIN SIGNED MESSAGE-----\"\n signed = \"%s\\n%s\\n%s\\n%s\\n%s\\n%s\" % (b, document, c, signature_address, signature, d)\n click.echo('\\n' + signed + '\\n')\n if store:\n d = Document(rein, doc_type, signed, sig_verified=True, testnet=rein.testnet)\n rein.session.add(d)\n rein.session.commit()\n return d\n return validated\n\ndef assemble_order(rein, document):\n \"\"\"\n Take one document and build the entire order based on it. The idea here is that one Job ID should\n allow us to query each available server for each document type that is associated with it, then\n filter out bogus shit by focusing on who's signed correct stuff. This kind of command can also\n look for attempted changes in foundational info like participants public keys and redeem scripts.\n If this works well, we can reduce how much data is required at each stage. Finally, we should\n be able to serialize a job from end to end so it can be easily reviewed by a mediator.\n \"\"\"\n parsed = parse_document(document.contents)\n if 'Job ID' not in parsed:\n return 0\n job_id = parsed['Job ID']\n urls = Bucket.get_urls(rein)\n documents = []\n if job_id:\n for url in urls:\n res = Document.get_documents_by_job_id(rein, url, job_id)\n if res:\n documents += res\n order_id = Order.get_order_id(rein, job_id)\n if not order_id:\n o = Order(job_id, testnet=rein.testnet)\n rein.session.add(o) \n rein.session.commit()\n\n for document in documents:\n doc_type = Document.get_document_type(document)\n if not doc_type:\n rein.log.info('doc_type not detected')\n continue\n doc_hash = Document.calc_hash(document)\n d = rein.session.query(Document).filter(Document.doc_hash == doc_hash).first()\n if d:\n d.set_order_id(order_id)\n else:\n new_document = Document(rein, doc_type, document, order_id, 'external', source_key=None, sig_verified=True, testnet=rein.testnet)\n rein.session.add(new_document)\n\n rein.session.commit()\n\n return len(documents)\n # how do we test this? give it a document, it gets the job id, then does a query for all other docs \n # with that job id. if it can figure out the doc type, it sets the order id on it. this allows\n # Order.get_documents() to provide all documents or to provide just the post or the bid.\n\ndef unique(the_array, key=None):\n \"\"\"\n Filter an array of dicts by key\n \"\"\"\n unique = []\n values = []\n for element in the_array:\n if key:\n if key in element and element[key] not in values:\n values.append(element[key])\n unique.append(element)\n else:\n if element not in unique:\n unique.append(element)\n return unique\n","repo_name":"Stemby/python-rein","sub_path":"rein/lib/market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":12328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"18171895076","text":"import sqlite3\nfrom models import Employee, Location, Animal\n\nEMPLOYEES = [{\"id\": 1, \"name\": \"Jenna Solis\"}]\n\n\ndef get_all_employees():\n \"\"\"given code to fetch all employees from sql database\"\"\"\n # Open a connection to the database\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n # Just use these. It's a Black Box.\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Write the SQL query to get the information you want\n db_cursor.execute(\n \"\"\"\n SELECT\n e.*,\n l.name location_name,\n l.address as location_address\n FROM employee e\n JOIN Location l ON l.id = e.location_id\n \"\"\"\n )\n\n # Initialize an empty list to hold all employee representations\n employees = []\n\n # Convert rows of data into a Python list\n dataset = db_cursor.fetchall()\n\n # Iterate list of data returned from database\n for row in dataset:\n # Create an employee instance from the current row.\n # Note that the database fields are specified in\n # exact order of the parameters defined in the\n # Employee class above.\n employee = Employee(\n row[\"id\"], row[\"name\"], row[\"address\"], row[\"location_id\"], None, None\n )\n location = Location(row[\"id\"], row[\"location_name\"], row[\"location_address\"])\n\n employee.location = location.__dict__\n\n employees.append(employee.__dict__)\n\n return employees\n\n\n# Function with a single parameter\ndef get_single_employee(id):\n \"\"\"given code to fetch single employee from sql database given id\"\"\"\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Use a ? parameter to inject a variable's value\n # into the SQL statement.\n db_cursor.execute(\n \"\"\"\n SELECT\n e.*,\n l.name location_name,\n l.address as location_address,\n\t\t\ta.name as animal,\n a.breed,\n a.status,\n a.location_id,\n a.customer_id customer_id,\n\t\t\ta.id as animal_table_id\n\t\tFROM employee e\n JOIN Location l ON l.id = e.location_id\n JOIN employee_animals ea ON ea.employee_id = e.id\n\t\tJOIN Animal a ON animal_table_id = ea.animal_id\n WHERE e.id = ?\n \"\"\",\n (id,),\n )\n employee = None\n employee_animals = []\n # Load the single result into memory\n dataset = db_cursor.fetchall()\n for data in dataset:\n if employee is None:\n employee = Employee(\n data[\"id\"], data[\"name\"], data[\"address\"], data[\"location_id\"], None, None\n )\n location = Location(\n data[\"id\"], data[\"location_name\"], data[\"location_address\"]\n )\n employee.location = location.__dict__\n if data[\"animal\"] is not None:\n animal = Animal(data['animal_table_id'], data['animal'], data['breed'], data['status'], data['location_id'], data['customer_id'])\n employee_animals.append(animal.__dict__)\n\n employee.animals = employee_animals\n print(employee)\n return employee.__dict__\n\n\ndef get_employees_by_location(location_id):\n \"\"\"retrieves employee data from sql db from given location_id\"\"\"\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Write the SQL query to get the information you want\n db_cursor.execute(\n \"\"\"\n select\n *\n from Employee e\n WHERE e.location_id = ?\n \"\"\",\n (location_id,),\n )\n\n employees = []\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n employee = Employee(\n row[\"id\"], row[\"name\"], row[\"address\"], row[\"location_id\"], None, None\n )\n\n employees.append(employee.__dict__)\n return employees\n\n\ndef create_employee(employee):\n \"\"\"docstring for create employee. It posts employees\"\"\"\n # Get the id value of the last employee in the list\n max_id = EMPLOYEES[-1][\"id\"]\n\n # Add 1 to whatever that number is\n new_id = max_id + 1\n\n # Add an `id` property to the employee dictionary\n employee[\"id\"] = new_id\n\n # Add the employee dictionary to the list\n EMPLOYEES.append(employee)\n\n # Return the dictionary with `id` property added\n return employee\n\n\ndef delete_employee(id):\n \"\"\"deletes employee with matching employee Id\"\"\"\n # Initial -1 value for employee index, in case one isn't found\n employee_index = -1\n\n # Iterate the EMPLOYEES list, but use enumerate() so that you\n # can access the index value of each item\n for index, employee in enumerate(EMPLOYEES):\n if employee[\"id\"] == id:\n # Found the employee. Store the current index.\n employee_index = index\n\n # If the employee was found, use pop(int) to remove it from list\n if employee_index >= 0:\n EMPLOYEES.pop(employee_index)\n\n\ndef update_employee(id, new_employee):\n \"\"\"overwrites db employee\"\"\"\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\n \"\"\"\n UPDATE Employee\n SET\n name = ?,\n address = ?,\n location_id = ?\n WHERE id = ?\n \"\"\",\n (\n new_employee[\"name\"],\n new_employee[\"address\"],\n new_employee[\"locationId\"],\n id,\n ),\n )\n\n # Were any rows affected?\n # Did the client send an `id` that exists?\n rows_affected = db_cursor.rowcount\n\n if rows_affected == 0:\n # Forces 404 response by main module\n return False\n else:\n # Forces 204 response by main module\n return True\n","repo_name":"JohnMDoll/kennels-server","sub_path":"views/employee_requests.py","file_name":"employee_requests.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72974063147","text":"\n\nfrom __future__ import unicode_literals\n\nimport frappe, json\nfrom frappe import _dict\nimport frappe.share\nfrom frappe.utils import cint\nfrom frappe.boot import get_allowed_reports\nfrom frappe.permissions import get_roles, get_valid_perms \nfrom frappe.student import *\nfrom frappe.core.doctype.domain_settings.domain_settings import get_active_modules\nfrom frappe.instructor import *\nfrom frappe.general import *\n\nno_cache = 1\n\ndef get_context(context):\n\tuser = frappe.session.user\n\tif user==\"Guest\":\n\t\tcontext.guest='true'\n\t\tfrappe.local.flags.redirect_location = \"/E-learning\"\n\t\traise frappe.Redirect\n\tcontext.guest='false'\n\tcontext.term=get_current_term()\n\t\n\t\n\t\n\t\n\tif is_student(user):\n\t\tcontext.heis=\"student\"\n\t\tcontext.classes=get_student_courses()\n\t\tcontext.classes.sort()\n\t\tgroup=get_student_groupe()\n\t\tcontext.group=group[:-8]\n\t\tcontext.currentclass=context.classes[0]\n\t\t\n\t\tcontext.chat=chat(group,context.currentclass,context.term,'first')\n\t\tcontext.max=maxx(group,context.currentclass,context.term)\n\t\t\n\telif is_instructor(user):\n\t\tcontext.heis=\"instructor\"\n\t\tcontext.classes=get_classescourses()\n\t\t\n\t\tcontext.classes.sort()\n\t\tcontext.currentclass=context.classes[0]\n\t\tgetmeetings()\n\t\tcontext.name=get_instructorname()\n\t\tuser = frappe.session.user\n\t\tgroup,course=context.currentclass.split('_')\n\t\tgroup=group+'-'+context.term[:8]\n\t\t\t\n\t\tcontext.chat=chat(group,course,context.term,'first')\n\t\tcontext.max=maxx(group,course,context.term)\n\telse:\t\n\t\tfrappe.local.flags.redirect_location = \"/E-learning\"\n\t\traise frappe.Redirect\n\t\n","repo_name":"bahaou/pages","sub_path":"online.py","file_name":"online.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25864442282","text":"import exceptions\nimport xmlrpclib\nimport os\nimport traceback\nimport string\nimport sys\nimport time\nimport urlgrabber\nimport yaml # cobbler packaged version\n\ndef log_exc(apache):\n \"\"\"\n Log active traceback to logfile.\n \"\"\"\n (t, v, tb) = sys.exc_info()\n apache.log_error(\"Exception occured: %s\" % t )\n apache.log_error(\"Exception value: %s\" % v)\n apache.log_error(\"Exception Info:\\n%s\" % string.join(traceback.format_list(traceback.extract_tb(tb))))\n\nclass CobblerSvc(object):\n \"\"\"\n Interesting mod python functions are all keyed off the parameter\n mode, which defaults to index. All options are passed\n as parameters into the function.\n \"\"\"\n def __init__(self, server=None, apache=None, req=None):\n self.server = server\n self.apache = apache\n self.remote = None\n self.req = req\n\n def __xmlrpc_setup(self):\n \"\"\"\n Sets up the connection to the Cobbler XMLRPC server. \n This is the version that does not require logins.\n \"\"\"\n if self.remote is None:\n self.remote = xmlrpclib.Server(self.server, allow_none=True)\n self.remote.update()\n\n def index(self,**args):\n return \"no mode specified\"\n\n def debug(self,profile=None,**rest):\n # the purpose of this method could change at any time\n # and is intented for temporary test code only, don't rely on it\n self.__xmlrpc_setup()\n return self.remote.get_repos_compatible_with_profile(profile)\n\n def ks(self,profile=None,system=None,REMOTE_ADDR=None,REMOTE_MAC=None,**rest):\n \"\"\"\n Generate kickstart files...\n \"\"\"\n self.__xmlrpc_setup()\n data = self.remote.generate_kickstart(profile,system,REMOTE_ADDR,REMOTE_MAC)\n return u\"%s\" % data \n\n def yum(self,profile=None,system=None,**rest):\n self.__xmlrpc_setup()\n if profile is not None:\n data = self.remote.get_repo_config_for_profile(profile)\n elif system is not None:\n data = self.remote.get_repo_config_for_system(system)\n else:\n data = \"# must specify profile or system name\"\n return data\n\n def trig(self,mode=\"?\",profile=None,system=None,REMOTE_ADDR=None,**rest):\n \"\"\"\n Hook to call install triggers.\n \"\"\"\n self.__xmlrpc_setup()\n ip = REMOTE_ADDR\n if profile:\n rc = self.remote.run_install_triggers(mode,\"profile\",profile,ip)\n else:\n rc = self.remote.run_install_triggers(mode,\"system\",system,ip)\n return str(rc)\n\n def nopxe(self,system=None,**rest):\n self.__xmlrpc_setup()\n return str(self.remote.disable_netboot(system))\n\n def list(self,what=\"systems\",**rest):\n self.__xmlrpc_setup()\n buf = \"\"\n if what == \"systems\":\n listing = self.remote.get_systems()\n elif what == \"profiles\":\n listing = self.remote.get_profiles()\n elif what == \"distros\":\n listing = self.remote.get_distros()\n elif what == \"images\":\n listing = self.remote.get_images()\n elif what == \"repos\":\n listing = self.remote.get_repos()\n else:\n return \"?\"\n for x in listing:\n buf = buf + \"%s\\n\" % x[\"name\"]\n return buf\n\n def autodetect(self,**rest):\n self.__xmlrpc_setup()\n systems = self.remote.get_systems()\n\n # if kssendmac was in the kernel options line, see\n # if a system can be found matching the MAC address. This\n # is more specific than an IP match.\n\n macinput = rest[\"REMOTE_MAC\"]\n if macinput is not None:\n # FIXME: will not key off other NICs, problem?\n mac = macinput.split()[1].strip()\n else:\n mac = \"None\"\n\n ip = rest[\"REMOTE_ADDR\"]\n\n candidates = []\n\n for x in systems:\n for y in x[\"interfaces\"]:\n if x[\"interfaces\"][y][\"mac_address\"].lower() == mac.lower():\n candidates.append(x)\n\n if len(candidates) == 0:\n for x in systems:\n for y in x[\"interfaces\"]:\n if x[\"interfaces\"][y][\"ip_address\"] == ip:\n candidates.append(x)\n\n if len(candidates) == 0:\n return \"FAILED: no match (%s,%s)\" % (ip, macinput)\n elif len(candidates) > 1:\n return \"FAILED: multiple matches\"\n elif len(candidates) == 1:\n return candidates[0][\"name\"]\n\n def look(self,**rest):\n # debug only\n return repr(rest)\n\n def findks(self,system=None,profile=None,**rest):\n self.__xmlrpc_setup()\n\n serverseg = self.server.replace(\"http://\",\"\")\n serverseg = self.server.replace(\"/cobbler_api\",\"\")\n\n name = \"?\" \n type = \"system\"\n if system is not None:\n url = \"%s/cblr/svc/op/ks/system/%s\" % (serverseg, name)\n elif profile is not None:\n url = \"%s/cblr/svc/op/ks/profile/%s\" % (serverseg, name)\n else:\n name = self.autodetect(**rest)\n if name.startswith(\"FAILED\"):\n return \"# autodetection %s\" % name \n url = \"%s/cblr/svc/op/ks/system/%s\" % (serverseg, name)\n \n try: \n return urlgrabber.urlread(url)\n except:\n return \"# kickstart retrieval failed (%s)\" % url\n\n def puppet(self,hostname=None,**rest):\n self.__xmlrpc_setup()\n\n if hostname is None:\n return \"hostname is required\"\n \n results = self.remote.find_system_by_hostname(hostname)\n\n classes = results.get(\"mgmt_classes\", {})\n params = results.get(\"mgmt_parameters\",[])\n\n newdata = {\n \"classes\" : classes,\n \"parameters\" : params\n }\n \n return yaml.dump(newdata)\n\n","repo_name":"remotesyssupport/cobbler-template-files","sub_path":"cobbler/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16210062030","text":"import time\n\nfrom peas.sensors import ArduinoSerialMonitor\n\n\ndef main(loop=True, delay=1., verbose=False):\n # Weather object\n monitor = ArduinoSerialMonitor(auto_detect=False)\n\n while True:\n data = monitor.capture()\n\n if verbose and len(data.keys()) > 0:\n print(data)\n\n if not args.loop:\n break\n\n time.sleep(args.delay)\n\n\nif __name__ == '__main__':\n import argparse\n\n # Get the command line option\n parser = argparse.ArgumentParser(description=\"Read sensor data from arduinos\")\n\n parser.add_argument('--loop', action='store_true', default=True,\n help=\"If should keep reading, defaults to True\")\n parser.add_argument(\"-d\", \"--delay\", dest=\"delay\", default=1.0, type=float,\n help=\"Interval to read sensors\")\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help=\"Print results to stdout\")\n args = parser.parse_args()\n\n main(**vars(args))\n","repo_name":"panoptes/PEAS","sub_path":"scripts/simple_sensors_capture.py","file_name":"simple_sensors_capture.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71960917227","text":"from gpiozero import MotionSensor\nimport time\nimport telepot\nimport os\nimport tweepy\nimport time\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom picamera import PiCamera\nfrom blur_face import muka_blur\n\ncamera = PiCamera()\npath = Path(\".\") / '.env'\nload_dotenv(dotenv_path=path)\npir = MotionSensor(4)\n\n#variables for accessing twitter API\nconsumer_key=os.getenv(\"CONSUMER_KEY\")\nconsumer_secret_key=os.getenv(\"CONSUMER_SECRET_KEY\")\naccess_token=os.getenv(\"ACCESS_TOKEN\")\naccess_token_secret=os.getenv(\"ACCESS_TOKEN_SECRET\")\nbot_id = os.getenv('TELEGRAM_BOTID')\nbot = telepot.Bot(bot_id)\nchat_id = 674827444\nbot.getMe()\ndetected = False\n\nif __name__ == \"__main__\":\n auth=tweepy.OAuthHandler(consumer_key,consumer_secret_key)\n auth.set_access_token(access_token,access_token_secret)\n api=tweepy.API(auth)\n while True:\n if pir.motion_detected and detected:\n print('sending...')\n detected = False\n tweet_text = \"Pelanggar Peraturan Taman\"\n image_path = '/home/pi/captured.jpg'\n time_start = time.time()\n camera.capture(image_path)\n time_capture = round(time.time() - time_start, 2)\n print(f\"time to capture : {time_capture} second\")\n time_start = time.time()\n blur_path = muka_blur(image_path)\n time_toblur = round(time.time() - time_start, 2)\n print(f\"time to blur : {time_toblur} second\")\n time_start = time.time()\n bot.sendPhoto(chat_id, photo=open(blur_path, 'rb'))\n time_totelegram = round(time.time() - time_start, 2)\n print(f\"time to telgram : {time_totelegram} second\")\n time_start = time.time()\n # bot.sendMessage(chat_id, tweet_text)\n api.update_with_media(blur_path, tweet_text)\n time_totwitter = round(time.time() - time_start, 2)\n print(f\"time to twitter : {time_totwitter} second\")\n # api.update_status(status=tweet_text)\n print('done.')\n time_total = round((time_toblur + time_totwitter + time_totelegram + time_capture), 2)\n print(f\"time to done : {time_total} second\")\n time.sleep(3)\n elif not detected:\n detected = True\n print('no detection')\n # time.sleep(1)","repo_name":"hadyanadam/parkranger-security-camera","sub_path":"status_tweepy.py","file_name":"status_tweepy.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24806669782","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nVOLUME DE UMA ESFERA\r\n(4/3) * pi * R3. Considere (atribua) para pi o valor 3.14159.\r\n@author: psgei\r\n\"\"\"\r\n\r\nr = float(input())\r\npi = 3.14159\r\nv = (4.0/3) * pi * r ** 3\r\n\r\nprint('VOLUME = {:.3f}'.format(v))\r\n","repo_name":"psgeisa/python_desafios","sub_path":"011 - VOLUME DE UMA ESFERA.py","file_name":"011 - VOLUME DE UMA ESFERA.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15265874722","text":"from math import sqrt\n\n\ndef closestStraightCity(c, x, y, q):\n # Write your code here\n cities_coordinates = {}\n for i in range(len(c)):\n shortest_distance = [float(\"inf\"), None]\n for j in range(len(c)):\n if i == j:\n continue\n distance = get_distances(x[i], y[i], x[j], y[j])\n if shortest_distance[0] > distance:\n shortest_distance = [distance, c[j]]\n cities_coordinates[c[i]] = shortest_distance[1]\n string_array = []\n for query in q:\n string_array.append(cities_coordinates[query])\n\n return string_array\n\n\ndef get_distances(x1, y1, x2, y2):\n if x1 == x2:\n return abs(y1 - y2)\n elif y1 == y2:\n return abs(x1 - y2)\n else:\n return float(\"inf\")\n\n\nprint(\n closestStraightCity(\n [\"fastcity\", \"bigbanana\", \"xyz\"],\n [23, 23, 23],\n [1, 10, 20],\n [\"fastcity\", \"bigbanana\", \"xyz\"],\n )\n)\n","repo_name":"chrisuzor/PersonalProjects","sub_path":"HackerRank/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12431513176","text":"list1 = ['_', '_', '_', '_', '_', '_', '_', '_', '_']\n\ndef error():\n print('Neatbilstoša vērtība!')\n\ndef error2():\n print('Lauciņš ir aizņemts!')\n\ndef test_input(a):\n try:\n a = int(a)\n except:\n return True\n if a > 3 or a < 1:\n return False\n else: return True\n\ndef test_gajiens(b):\n if list1[b] == '_':\n return True\n\ndef uzvaretajs(e):\n print('Uzvarēja spēlētājs ' + e + '!')\n\ndef test_uzvara1(c):\n if list1[0] == list1[1] == list1[2] == c: return True\n elif list1[3] == list1[4] == list1[5] == c: return True\n elif list1[6] == list1[7] == list1[8] == c: return True\n elif list1[0] == list1[3] == list1[6] == c: return True\n elif list1[1] == list1[4] == list1[7] == c: return True\n elif list1[2] == list1[5] == list1[8] == c: return True\n elif list1[0] == list1[4] == list1[8] == c: return True\n elif list1[2] == list1[4] == list1[6] == c: return True\n\ndef printet():\n print(list1[0],list1[1],list1[2])\n print(list1[3],list1[4],list1[5])\n print(list1[6],list1[7],list1[8])\n\ndef main_funkcija():\n\n printet()\n\n for i in range(1,10):\n print('---------------')\n if i % 2 == 1:\n print('Gājiens spēlētājam X')\n d = 'X'\n while d == 'X':\n while d == 'X':\n line1 = input('Ievadiet rindu (1-3):')\n test_input(line1)\n if test_input(line1):\n break\n error()\n\n while d == 'X':\n kolona1 = input('Ievadiet kolonu (1-3):')\n test_input(line1)\n if test_input(line1):\n break\n error()\n\n gajiens = (int(line1) - 1) * 3 + int(kolona1) - 1\n if test_gajiens(gajiens):\n break\n error2()\n\n list1[gajiens] = d\n if test_uzvara1(d):\n uzvaretajs(d)\n break\n printet()\n\n else:\n print('Gājiens spēlētājam 0')\n d = '0'\n while d == '0':\n while d == '0':\n line1 = input('Ievadiet rindu (1-3):')\n test_input(line1)\n if test_input(line1):\n break\n error()\n\n while d == '0':\n kolona1 = input('Ievadiet kolonu (1-3):')\n test_input(line1)\n if test_input(line1):\n break\n error()\n\n gajiens = (int(line1) - 1) * 3 + int(kolona1) - 1\n if test_gajiens(gajiens):\n break\n error2()\n\n list1[gajiens] = d\n\n\n if test_uzvara1(d):\n uzvaretajs(d)\n break\n printet()\n if i == 9:\n print('Spēle beidzās neizšķirti!')\n\nif __name__ == '__main__':\n main_funkcija()\n","repo_name":"robertspauls/DA-kurss","sub_path":"python-spele.py","file_name":"python-spele.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6949204411","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pd.read_csv('selection_C++.txt',sep = \",\",names=['Size','Time'])\r\ndf1 = pd.read_csv('selection_Java.txt',sep = \",\",names=['Size','Time'])\r\ndf2 = pd.read_csv('selection_Python.txt',sep = \",\",names=['Size','Time'])\r\nx=df.iloc[:,0]\r\ny=df.iloc[:,1]\r\ny1 = df1.iloc[:,1]\r\ny2 = df2.iloc[:,1]\r\n\r\n##########################################################\r\nplt.plot(x,y,color=\"green\",label=\"Selection Sort C++\")\r\nplt.plot(x,y1,color=\"red\",label=\"Selection Sort Java\")\r\nplt.plot(x,y2, color=\"yellow\", label=\"Selection Sort Python\")\r\n\r\n##########################################################\r\nplt.xlabel('Size')\r\nplt.ylabel('Tiempo(ms)')\r\nplt.title('Selection Sort Versus')\r\nplt.legend()\r\nplt.show()","repo_name":"BraderLh/EDA","sub_path":"python Sorts/versusselection.py","file_name":"versusselection.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35195742995","text":"from flask import Blueprint, render_template, request, Response\nimport json\nimport plotly, plotly.graph_objs\nfrom model.stock_data_getter.stock_data_getter import get_daily\n\nstock_viewer_blueprint = Blueprint('stock-viewer', __name__)\n\n@stock_viewer_blueprint.route('/stock-viewer/')\ndef index():\n return render_template('stock_viewer.html')\n\n@stock_viewer_blueprint.route('/stock_data/', methods=['GET'])\ndef get_symbol_daily():\n symbol = request.args.get('symbol')\n graph_format = request.args.get('format')\n compact = True if request.args.get('compact') == 'true' or request.args.get('compact') is None else False\n if symbol and graph_format:\n data = get_daily(symbol)\n if data is not None:\n if graph_format == 'line':\n dates = list(data.keys()) if not compact else list(data.keys())[:100]\n values = [elem['4. close'] for elem in data.values()]\n if compact:\n values = values[:100]\n\n trace = plotly.graph_objs.Scatter(\n x = dates,\n y = values,\n mode = 'lines'\n )\n return Response(json.dumps([trace], cls=plotly.utils.PlotlyJSONEncoder), mimetype='application/json')\n else:\n return Response(\"Symbol not found\", status=404)\n else:\n return Response(\"Error: symbol or graph_format empty\", status=500)","repo_name":"Jorjatorz/stock-model","sub_path":"blueprints/stock_viewer.py","file_name":"stock_viewer.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42020453316","text":"import numpy as np\n\nfrom src.keypoint_alignment.converters.keypoints_converter import KeypointMapper\n\n\nclass FaceKeypoint28To4Mapper(KeypointMapper):\n def convert_points(self, points):\n # From stylegan mapping\n face_landmarks = points\n lm = np.array(face_landmarks)\n\n lm_eye_left = lm[11: 17] # left-clockwise\n lm_eye_right = lm[17: 23] # left-clockwise\n\n\n eye_left = np.mean(lm_eye_left, axis=0)\n eye_right = np.mean(lm_eye_right, axis=0)\n mouth_left = lm[24]\n mouth_right = lm[26]\n\n out_dict = {'eye_left': eye_left,\n 'eye_right': eye_right,\n 'mouth_left': mouth_left,\n 'mouth_right': mouth_right}\n return out_dict\n","repo_name":"MalchuL/StyleTransferDatasetPreparation","sub_path":"src/keypoint_alignment/converters/face/kps_28_to_4.py","file_name":"kps_28_to_4.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72996199468","text":"from django.contrib import admin\nfrom .models import *\nfrom django.contrib.admin import ModelAdmin\nfrom chat.base.setup import CHAT_DEBUG\n\n\n@admin.register(Message)\nclass MessageAdmin(ModelAdmin):\n list_display = [\n \"text\",\n \"sender\",\n \"receiver\",\n \"chat_id\",\n \"created_at\",\n \"edited\",\n \"seen\",\n \"id\"\n ]\n\n\n@admin.register(ClientSession)\nclass ClientSessionAdmin(ModelAdmin):\n list_display = [\"user\", \"expires_at\"]\n if CHAT_DEBUG:\n list_display += [\"secret\"]\n list_display += [\"id\"]\n\n readonly_fields = [\"secret\"]\n\n\n@admin.register(Participation)\nclass ParticipationAdmin(ModelAdmin):\n list_display = [\"user\", \"chat\"]\n search_fields = [\"user\"]\n\n\nclass ParticipationTabular(admin.TabularInline):\n model = Participation\n extra = 0\n\n\n@admin.register(Chat)\nclass ChatAdmin(ModelAdmin):\n inlines = [ParticipationTabular]\n","repo_name":"AzikDeveloper/django-channels-chat","sub_path":"chat/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"43926328914","text":"import socket\nimport json\n\nclass Commands:\n address = None\n port = None\n isConnected = False\n \n def __init__(self, client, bufferSize):\n self.bufferSize = bufferSize\n self.client = client\n self.commandList = ['join', 'leave', 'msg', 'register', 'all', '?']\n \n def sendJsonMessage(self, jsonMessage, serverAddress):\n try:\n jsonString = json.dumps(jsonMessage)\n self.client.sendto(jsonString.encode(), serverAddress)\n except Exception as e: print(f\"[Error] {e}\")\n \n return\n \n def tokenizeCommandString(self, commandString):\n if(not commandString): return None\n if(commandString[0] == '/'):\n return commandString[1:].split(' ')\n else:\n print(\"[Error] Invalid command! Start commands with a /\") \n \n def checkParams(self, command, numParams, expectedParams):\n if command == \"msg\" and numParams > 2:\n return True\n if numParams == expectedParams:\n return True\n print(f\"[Error] Expected {expectedParams} params for command [{command}], but {numParams} were given\")\n return False\n\n def commandSwitch(self, command):\n if command == None:\n return\n \n if command[0] not in self.commandList:\n print(\"[Error] Invalid command!\")\n return\n \n action = command[0]\n parameters = command[1:]\n \n if action == \"join\":\n if(self.checkParams(\"join\", len(parameters), 2)):\n self.joinCommand(parameters[0], parameters[1])\n return\n \n if action == \"?\":\n self.commandHelp()\n return \n \n if Commands.isConnected:\n \n if action == \"leave\":\n if(self.checkParams(\"leave\", len(parameters), 0)):\n self.leaveCommand()\n return\n \n if action == \"msg\":\n parameters = len(command) - 1\n \n if(self.checkParams(\"msg\", parameters, 2)):\n receiverName = command[1]\n message = ' '.join(command[2:])\n self.msgCommand(receiverName, message)\n \n if action == \"register\":\n if(self.checkParams(\"register\", len(parameters), 1)):\n self.registerCommand(parameters[0])\n return\n \n if action == \"all\":\n message = ' '.join(command[1:])\n self.allCommand(message)\n \n else:\n print(\"[Error] Connect to a server first!\")\n\n return\n \n def joinCommand(self, address, port):\n try:\n destinationServer = (address, int(port))\n except Exception as error:\n print(\"[Error] Invalid address port\")\n return\n \n \n try:\n jsonMessage = {\n \"command\": \"join\",\n }\n self.client.settimeout(5)\n\n self.sendJsonMessage(jsonMessage, destinationServer)\n message, senderAddress = self.receiveFromServer()\n message = message.decode()\n messageJson = json.loads(message)\n printMessage = messageJson[\"message\"]\n print(f\"{printMessage}\")\n \n Commands.address = address\n Commands.port = int(port)\n Commands.isConnected = True\n \n except socket.error as error:\n print(\"[Connection Error] Server does not exist\")\n \n return\n\n def leaveCommand(self):\n \n jsonMessage = {\n \"command\" : \"leave\",\n }\n self.sendJsonMessage(jsonMessage, (Commands.address, Commands.port))\n Commands.address = None\n Commands.port = None\n Commands.isConnected = False\n \n \n def msgCommand(self, messageReceiver, message):\n jsonMessage = {\n \"command\" : \"msg\",\n \"handle\" : messageReceiver,\n \"message\" : message,\n }\n self.sendJsonMessage(jsonMessage, (Commands.address, Commands.port))\n return\n \n def registerCommand(self, handle):\n jsonMessage = {\n \"command\" : \"register\",\n \"handle\" : handle\n }\n self.sendJsonMessage(jsonMessage, (Commands.address, Commands.port))\n return\n\n def allCommand(self, message):\n jsonMessage = {\n \"command\" : \"all\",\n \"message\" : f\"{message}\"\n }\n try:\n self.sendJsonMessage(jsonMessage, (Commands.address, Commands.port))\n except Exception as error:\n print(f\"[Message Send Error] {error}\")\n return\n\n def receiveFromServer(self):\n message, address = self.client.recvfrom(self.bufferSize)\n return message, address\n \n def threadRecvFromServer(self):\n while True:\n try:\n message, address = self.receiveFromServer()\n message = message.decode()\n messageJson = json.loads(message)\n printMessage = messageJson[\"message\"]\n print(f\"{printMessage}\")\n except:\n continue\n \n \n def commandHelp(self):\n print(\"\"\" +==========================+=============================================+======================+\n | Command | Description | Sample Usage |\n +==========================+=============================================+======================+\n | /join | Connect to a server with IP and Port | /join 127.0.0.1 5000 |\n +--------------------------+---------------------------------------------+----------------------+\n | /leave | Disconnect from the current server | /disconnect |\n +--------------------------+---------------------------------------------+----------------------+\n | /register | Register a unique handle or alias | /register Milize |\n +--------------------------+---------------------------------------------+----------------------+\n | /all | Send a message to all users | /all I have arrived! |\n +--------------------------+---------------------------------------------+----------------------+\n | /msg | Send a message to a specific user or handle | /msg Milize G valo? |\n +--------------------------+---------------------------------------------+----------------------+\n | /? | Display this menu | /? |\n +--------------------------+---------------------------------------------+----------------------+\"\"\")\n \n\n","repo_name":"ethansaqui/CSNETWK-MP","sub_path":"client/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42268732157","text":"\nclass Jugador():\n\n\tdef __init__(self, equipo, equipoContrario):\n\n\t\tself.equipo = equipo\n\t\tself.equipoContrario = equipoContrario\n\t\tself.movimiento = 0\n\t\tself.tiempo = 0\n\t\tself.check = False\n\t\tself.PIEZAS_CAPTURADAS = []\n\n\t\tpass\n\n\t# Método para introducir pieza capturada en el tablero\n\t@staticmethod\n\tdef introducirPieza(jugador, tablero):\n\n\t\t# Variable para evaluar si el movimiento es válido\n\t\tmovimientoValido = False\n\t\t# Variable para contar intentos de movimientos erróneos\n\t\tcontadorMovimientoInvalido = 0\n\n\t\tprimerEv = False\n\t\tsegundaEv = False\n\n\t\twhile movimientoValido != True and contadorMovimientoInvalido < 2:\n\n\t\t\tif(int(len(jugador.PIEZAS_CAPTURADAS)) == 0):\n\t\t\t\treturn False\n\n\t\t\tindexPieza = int(input('Ingrese el número de la pieza a introducir en el tablero\\n'))\n\t\t\t# jugador, tablero, indexPieza\n\t\t\t# Evaluamos que la ubicación en el tablero este desocupada\n\t\t\txy = input(\"Ingrese el lugar donde quiere ingresar la pieza (fila, columna separadas por espacio)\\n\")\n\t\t\tx = int(xy[0])\n\t\t\ty = int(xy[2])\n\n\t\t\t# Obtenemos la pieza de la lista de piezas capturadas\n\t\t\tnewPieza = jugador.__dict__['PIEZAS_CAPTURADAS'][indexPieza]\n\n\t\t\tif(tablero[x][y] != \" \"):\n\t\t\t\treturn False\n\n\t\t\t# Evaluamos si hay un peon de nuestro equipo en esa columna (solo en caso de que querramos insertar un peón)\n\t\t\tif(newPieza.tipo == 'Peón'):\n\t\t\t\tisAPeon = False\n\n\t\t\t\tfor i in range(0,9):\n\t\t\t\t\tif(tablero[i][y] != ' '):\n\t\t\t\t\t\tif(tablero[i][y].team == jugador.equipo and tablero[i][y].tipo == 'Peón'):\n\t\t\t\t\t\t\tisAPeon = True\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\tif(isAPeon == True):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tproblema = input('Hay almenos un peon ubicado en esta columna. No puede introducir un peón. Desea elegir otra pieza o cambiar el movimiento (presione 1 o 2 respectivamente)')\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tproblema = input('INGRESE UN VALOR VÁLIDO \\n')\n\t\t\t\t\tif(int(problema) == 1):\n\t\t\t\t\t\tcontadorMovimientoInvalido = contadorMovimientoInvalido + 1\n\t\t\t\t\t\tmovimientoValido = False\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif(int(problema)) == 2:\n\t\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\tprimerEv = True\n\n\n\t\t\t\t# Peones y lanceros no pueden ser introducidos en la ultima fila\n\t\t\t\tif((newPieza.tipo == \"Peón\" and primerEv == True) or newPieza.tipo == \"Lancero\"):\n\t\t\t\t\tif(jugador.equipo == \"blanco\" and x == 9):\n\t\t\t\t\t\t_problema = input('La pieza seleccionada no puede ser introducida en la ubicación deseada. Desea elegir otra pieza o cambiar el movimiento (presione 1 o 2 respectivamente)')\n\t\t\t\t\t\tif(int(_problema) == 1):\n\t\t\t\t\t\t\tcontadorMovimientoInvalido = contadorMovimientoInvalido + 1\n\t\t\t\t\t\t\tmovimientoValido = False\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif(int(_problema)) == 2:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telif(jugador.equipo == \"negro\" and x == 1):\n\t\t\t\t\t\t_problema = input('La pieza seleccionada no puede ser introducida en la ubicación deseada. Desea elegir otra pieza o cambiar el movimiento (presione 1 o 2 respectivamente)')\n\t\t\t\t\t\tif(int(_problema) == 1):\n\t\t\t\t\t\t\tcontadorMovimientoInvalido = contadorMovimientoInvalido + 1\n\t\t\t\t\t\t\tmovimientoValido = False\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif(int(_problema)) == 2:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\tsegundaEv = True\n\n\t\t\t\t# Los caballos no pueden ingresarse ni en la ultima ni penúltima fila\n\t\t\t\tif(newPieza.tipo == \"Caballo\"):\n\t\t\t\t\tif(jugador.equipo == \"blanco\" and x >= 8):\n\t\t\t\t\t\t_problema = input('La pieza seleccionada no puede ser introducida en la ubicación deseada. Desea elegir otra pieza o cambiar el movimiento (presione 1 o 2 respectivamente)')\n\t\t\t\t\t\tif(int(_problema) == 1):\n\t\t\t\t\t\t\tcontadorMovimientoInvalido = contadorMovimientoInvalido + 1\n\t\t\t\t\t\t\tmovimientoValido = False\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif(int(_problema)) == 2:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telif(jugador.equipo == \"negro\" and x <= 2):\n\t\t\t\t\t\t_problema = input('La pieza seleccionada no puede ser introducida en la ubicación deseada. Desea elegir otra pieza o cambiar el movimiento (presione 1 o 2 respectivamente)')\n\t\t\t\t\t\tif(int(_problema) == 1):\n\t\t\t\t\t\t\tcontadorMovimientoInvalido = contadorMovimientoInvalido + 1\n\t\t\t\t\t\t\tmovimientoValido = False\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif(int(_problema)) == 2:\n\t\t\t\t\t\t\treturn False\n\t\t\t\t\telse:\n\t\t\t\t\t\tsegundaEv = True\n\t\t\t\t# Las piezas al ingresar no se coronan, hay que esperar un turno para coronarlas\n\t\t\tif(primerEv == True and segundaEv == True):\n\t\t\t\tmovimientoValido = True\n\n\t\t\tif(movimientoValido == True):\n\t\t\t\tjugador.__dict__['PIEZAS_CAPTURADAS'].pop(indexPieza)\n\t\t\t\t# Cambiamos el equipo de la pieza capturada (al reproducir nuevamente el tablero aparecerá con el símbolo que corresponde)\n\t\t\t\tnewPieza.cambiarEquipo(jugador.__dict__['equipo'])\n\t\t\t\t# Si la pieza se corona, le ponemos en falso este campo\n\t\t\t\tif(newPieza.__dict__['corona'] == True):\n\t\t\t\t\tnewPieza.__dict__['coronado'] = False\n\t\t\t\t# Retornamos el tablero. Deberemos inicializarlo nuevamente.\n\t\t\t\ttablero[x][y] = newPieza\n\n\t\t\t\treturn tablero\n\n","repo_name":"andres925922/EDA4.0","sub_path":"jugadores/jugador.py","file_name":"jugador.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3508070173","text":"# ######################## EXAMPLE USAGE###########################\n# import torch\n# from PIL import Image\n# import torch.nn as nn\n# import torchvision\n# from torchvision import models, transforms\n# import numpy as np\n\n# # def your image path\n# image_path = ''\n\n# # def your model path can be relative can be apsolute\n# model_path = ''\n\n# # def transforms\n# transforms = transforms.Compose([\n# transforms.Resize(224),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406],\n# [0.229, 0.224, 0.225])\n# ])\n\n# # check avalible device\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# # load model\n# model = models.resnet50(pretrained=False).to(device)\n# for param in model.parameters():\n# param.requires_grad = False\n# model.fc = nn.Sequential(\n# nn.Linear(2048, 128),\n# nn.ReLU(inplace=True),\n# nn.Linear(128, 2)).to(device)\n# model.load_state_dict(torch.load(model_path), strict=False)\n# model = model.to(device)\n# model.eval()\n\n# # laod image\n# image = Image.open(image_path)\n# image_tensor = transforms(image)\n# image_tensor = image_tensor.unsqueeze(0)\n# image_tensor = image_tensor.to(device)\n# logps = model(image_tensor)\n# ps = torch.exp(logps)\n# top_p, top_class = ps.topk(1, dim=1)\n# index = top_class.item()\n\n# ############################# INPUT #############################\n# image size: (224, 244, 3)\n\n# ############################ OUTPUT #############################\n# int value: 0 or 1\n\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nimport cv2\n\nfrom kjn_face_id_system.id_card_classification_multi_shot.classification_universal_trainscript import (\n UniversaClassificationTrainer,\n)\n\n\nclass IdCardClassificator:\n WEIGHT_PATH = Path(\"models/id_card_classification_multi_shot/models/resnet50.pth\")\n CLASS_NAME_TO_IDX = {\n \"albanian_front\": 0,\n \"argentine_front\": 1,\n \"austria_back\": 2,\n \"austria_front\": 3,\n \"belgium_back\": 4,\n \"belgium_front\": 5,\n \"bosnian_front\": 6,\n \"bulgaria_back\": 7,\n \"bulgaria_front\": 8,\n \"chile_front\": 9,\n \"croatia_back\": 10,\n \"croatia_front\": 11,\n \"czech_back\": 12,\n \"czech_front\": 13,\n \"estonia_back\": 14,\n \"estonia_front\": 15,\n \"finland_back\": 16,\n \"finland_front\": 17,\n \"france_back\": 18,\n \"france_front\": 19,\n \"germany_back\": 20,\n \"germany_front\": 21,\n \"hungary_type_1_back\": 22,\n \"hungary_type_1_front\": 23,\n \"hungary_type_2_front\": 24,\n \"indonesian_front\": 25,\n \"israel_front\": 26,\n \"italian_back\": 27,\n \"italian_front\": 28,\n \"latvia_back\": 29,\n \"latvia_front\": 30,\n \"liechtenstein_back\": 31,\n \"liechtenstein_front\": 32,\n \"lithuania_back\": 33,\n \"lithuania_front\": 34,\n \"macau_back\": 35,\n \"macau_front\": 36,\n \"malta_front\": 37,\n \"norway_back\": 38,\n \"norway_front\": 39,\n \"poland_type_1_front\": 40,\n \"poland_type_2_front\": 41,\n \"poland_type_3_front\": 42,\n \"poland_type_4_back\": 43,\n \"poland_type_4_front\": 44,\n \"portugal_front\": 45,\n \"romania_back\": 46,\n \"romania_front\": 47,\n \"serbian_front\": 48,\n \"slovakia_back\": 49,\n \"slovakia_front\": 50,\n \"south_africa_front\": 51,\n \"sweden_back\": 52,\n \"sweden_front\": 53,\n \"turkish_front\": 54,\n \"ukrainian_front\": 55,\n \"uruguay_front\": 56,\n \"usa_passport_back\": 57,\n \"usa_passport_front\": 58,\n \"venezuela_front\": 59,\n }\n IDX_TO_CLASS_NAME = {i: c for c, i in CLASS_NAME_TO_IDX.items()}\n\n def __init__(\n self,\n device: Optional[torch.device] = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n ),\n ) -> None:\n self.device = device\n self.img_transform = A.Compose(\n [\n A.Resize(224, 224),\n A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ToTensorV2(),\n ]\n )\n self.model = torch.load(self.WEIGHT_PATH)\n self.model.to(self.device)\n\n def predict(self, image: np.ndarray) -> dict:\n image = self.img_transform(image=image)[\"image\"]\n image = image.unsqueeze(0)\n image.to(self.device)\n with torch.no_grad():\n prediction = self.model(image)\n top_p, top_class = prediction.topk(1, dim=1)\n top_class = top_class.item()\n top_p = top_p.item()\n class_name = self.IDX_TO_CLASS_NAME[top_class]\n return {\n \"prediction\": prediction,\n \"top_p\": top_p,\n \"top_class\": top_class,\n \"class_name\": class_name,\n }\n","repo_name":"kornellewy/kjn_face_id_system","sub_path":"kjn_face_id_system/id_card_classification_multi_shot/id_card_classificator.py","file_name":"id_card_classificator.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1867460759","text":"import argparse\r\nimport cv2\r\nimport librosa\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport soundfile as sf\r\nimport torch\r\nfrom DeepSpeech2.deepspeech_pytorch.config import SpectConfig\r\nfrom DeepSpeech2.deepspeech_pytorch.model import DeepSpeech\r\nfrom DeepSpeech2.deepspeech_pytorch.utils import load_decoder\r\nfrom moviepy.editor import VideoFileClip\r\nfrom scipy import signal\r\nfrom Utils.audio import parse_audio_spect\r\nfrom Utils.misc import create_directory\r\nimport time\r\n\r\nparser = argparse.ArgumentParser(description='DeepSpeech2 Feature Extraction')\r\nparser.add_argument('input', help=\"Folder containing fake and real folders, inside those folders \\\r\n all of the video files are stored\")\r\nparser.add_argument('output', help=\"Saves DeepSpeech2 tensors to this file_path. It will create a fake \\\r\n and real folder, in which the video label folder will be \\\r\n created containing the tensors\")\r\nparser.add_argument('-device', default='cuda', help=\"device for model (default cuda)\")\r\nparser.add_argument('-pretrained', \r\n default='DeepSpeech2/pretrained_weights/librispeech_pretrained_v2.pth',\r\n help='path to pretrained weights for DeepSpeech2 model')\r\n\r\ndef save_deepspeech2_features(model, input_path, save_path, video_id, \r\n window_stride = .01,\r\n window_size = .02,\r\n sample_rate = 16000,\r\n window = 'hamming'):\r\n spect = parse_audio_spect(input_path, window_stride, window_size, sample_rate, window)\r\n spect = spect.transpose()\r\n input_size = torch.IntTensor([100]).int().to('cuda') # 1 sec input (100 data points)\r\n\r\n # list_outputs = [] \r\n # every 100 data points on the spectrogram is 1 sec of audio\r\n for sec in range(len(spect) // 100):\r\n one_sec_spect = spect[sec * 100 : min((sec+1) * 100, len(spect))]\r\n one_sec_spect = torch.FloatTensor(one_sec_spect.transpose()).unsqueeze(0).unsqueeze(0).to('cuda')\r\n out, out_size, features = model(one_sec_spect, input_size)\r\n # list_outputs.append((out, out_size))\r\n torch.save(features, f'{save_path}/{video_id[:-4]}-{sec:03d}-{features.shape[0]}.pt')\r\n \r\n # verify_output(list_outputs, model)\r\n\r\n return out, out_size, features\r\n\r\ndef verify_output(list_outputs, model):\r\n decoder = 'greedy'\r\n lm_path = None # Path to an (optional) kenlm language model for use with beam search\r\n alpha = 0.8 # Language model weight\r\n beta = 1 # Language model word bonus (all words)\r\n cutoff_top_n = 40 # Cutoff number in pruning, only top cutoff_top_n characters with highest probs in \r\n # vocabulary will be used in beam search\r\n cutoff_prob = 1.0 # Cutoff probability in pruning,default 1.0, no pruning.\r\n beam_width = 10 # Beam width to use\r\n lm_workers = 1 # Number of LM processes to use\r\n\r\n decoder = load_decoder(decoder_type=decoder,\r\n labels=model.labels,\r\n lm_path=lm_path,\r\n alpha=alpha,\r\n beta=beta,\r\n cutoff_top_n=cutoff_top_n,\r\n cutoff_prob=cutoff_prob,\r\n beam_width=beam_width,\r\n lm_workers=lm_workers)\r\n \r\n for i, (out, out_size) in enumerate(list_outputs):\r\n decoded_output = decoder.decode(out, out_size)\r\n print(decoded_output[0])\r\n \r\ndef main():\r\n start = time.time()\r\n args = parser.parse_args()\r\n model = DeepSpeech.load_model(args.pretrained)\r\n model.to(args.device)\r\n\r\n window_stride = .01\r\n window_size = .02\r\n sample_rate = 16000\r\n window = 'hamming' #SpectConfig.window.value\r\n\r\n subfolders = ['real', 'fake']\r\n for subfolder in subfolders:\r\n file_list = [video_label for video_label in os.listdir(os.path.join(args.input, subfolder))]\r\n for video_id in file_list:\r\n input_path = os.path.join(args.input, subfolder, video_id)\r\n output_path = create_directory(os.path.join(args.output, subfolder, video_id))\r\n save_deepspeech2_features(model, input_path, output_path, video_id)\r\n\r\n print(time.time()-start)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"jklewis99/MultimodalDeepfakeDetection","sub_path":"deepspeech_extract.py","file_name":"deepspeech_extract.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"26002775599","text":"from tkinter import *\r\nimport webbrowser\r\nroot = Tk()\r\nroot.title(\"Page Open Bot\")\r\nroot.geometry(\"750x580\")\r\nroot.iconbitmap('Brand-Logo-Icon.ico')\r\nroot.resizable(False, False)\r\nroot.configure(bg='lemon chiffon')\r\nappLabel = Label(root, text=\"Open the pages you want !!\", font=(\"Times\", \"42\", \"bold italic\"), fg=\"red\", bg='lemon chiffon', anchor=CENTER)\r\nappLabel.pack()\r\nappLabel.place(x=10,y=5)\r\npageNo_frame = LabelFrame(root, text=\"Select No. of Pages\", background=\"lemon chiffon\", bd=4, fg='red', labelanchor=N)\r\nfrom_label = Label(pageNo_frame, text=\"From: \", background=\"lemon chiffon\")\r\nfrom_label.grid(row=0, column=0, padx=10, pady=10)\r\nfrom_entry = Entry(pageNo_frame)\r\nfrom_entry.grid(row=0, column=1, padx=10, pady=10)\r\nto_label = Label(pageNo_frame, text=\"To: \", background=\"lemon chiffon\")\r\nto_label.grid(row=0, column=2, padx=10, pady=10)\r\nto_entry = Entry(pageNo_frame)\r\nto_entry.grid(row=0, column=3, padx=10, pady=10)\r\ndef open():\r\n from_page_no = int(from_entry.get())\r\n to_page_no = int(to_entry.get())\r\n for i in range(from_page_no, to_page_no):\r\n url = \"https:/www.askmattrab.com/notes/\"+str(i)\r\n #print(url)\r\n webbrowser.open_new(url)\r\n last_url = \"https:/www.askmattrab.com/notes/\"+str(to_page_no)\r\n webbrowser.open_new(last_url)\r\n #print(last_url)\r\nopen_button = Button(root, text=\"Open Pages\", fg='white', bg='red', font=(\"Helvetica\", \"11\", \"italic\"), bd='3', command=open)\r\nopen_button.pack()\r\nopen_button.place(x=250, y=250)\r\npageNo_frame.pack()\r\npageNo_frame.place(x=40, y=100)\r\nroot.mainloop()\r\n","repo_name":"Raunakkumarr/Mattrab-Notes-Open-Bot","sub_path":"pageOpenBot.py","file_name":"pageOpenBot.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9395517149","text":"import torch\nimport numpy as np\nfrom PIL import Image\nfrom nd2reader import ND2Reader\nfrom pathlib import Path, PosixPath\nimport torchvision.transforms as transforms\nimport torchvision\n\n\ndef crop_to_patches(image, top, left, patch_height, patch_width, overlap):\n # overlap in precentage, for exmaple: 0.8 / 0.5\n overlap_range_width = int(patch_width * overlap)\n overlap_range_height = int(patch_height * overlap)\n patches_list = []\n cur_left = left\n\n range_x = 1 + (image.shape[1] - patch_height - (((image.shape[1] - patch_height)) % (patch_height - overlap_range_height))) // (patch_height-overlap_range_height)\n range_y = 1 + (image.shape[2] - patch_width - (((image.shape[2] - patch_width)) % (patch_width - overlap_range_width))) // (patch_width-overlap_range_width)\n\n for i in range(int(range_x)):\n for j in range(int(range_y)):\n if cur_left + overlap_range_width <= image.shape[2] and top + overlap_range_height <= image.shape[1]:\n patches_list.append(torchvision.transforms.functional.crop(image, top, cur_left, patch_height, patch_width))\n cur_left += (patch_width - overlap_range_width)\n top += (patch_height - overlap_range_height)\n cur_left = left\n\n return patches_list\n\ndef augmentation(img,n_rotation):\n '''\n the function creates augmentation of the same image: flipped L-R, and rotations.\n for each orientation (original + flipped) the function rotate by n_rotation.\n for example: if n_rotation = 6, 6 image will be created: 0 deg, 60 deg, 120 deg, 180 deg, 240 deg, 300 deg.\n :param img: pytorch tensor\n :param n_rotation: int\n :return: img_augmentations: pytorch tensor of tensors, each tensor is a new image\n '''\n flipped = img.fliplr()\n orientations = [img, flipped]\n img_augmentations = torch.zeros((n_rotation*2, img.shape[1], img.shape[2]))\n if n_rotation==0:\n angle=0\n else:\n angle = 360/n_rotation\n for i, cur_img in enumerate(orientations):\n for j in range(n_rotation):\n cur_angle = j*angle\n rotated_img = transforms.functional.rotate(cur_img, interpolation=transforms.InterpolationMode.BILINEAR, angle= cur_angle)\n img_augmentations[(i*n_rotation) +j, :,:] = rotated_img\n\n return img_augmentations\n\ndef create_patches_for_type(images_folder_path, patch_size, overlap, crop_start, n_rotations=2):\n '''\n The function get a folder with images, divides each image to patches, and take each patch and creates augmentations.\n :param images_folder: folder path as string\n :return: patches_all: tensor of patches tensors.\n '''\n patches_all = torch.tensor([])\n orig_images = []\n images_folder = PosixPath(images_folder_path)\n top, left = crop_start\n patch_height, patch_width = patch_size\n\n for image in images_folder.iterdir():\n if image.is_file():\n image_path = image.__fspath__()\n if \"nd2\" in image.suffix:\n nd = ND2Reader(image_path)\n img_arr = nd.get_frame(0)\n tensor_img = torch.tensor(np.array([np.int16(img_arr)]))\n else:\n try:\n img = Image.open(image_path)\n except:\n print(\"error - this file is not an image: \", image_path)\n continue\n convert_tensor = transforms.ToTensor()\n tensor_img = convert_tensor(img)\n if torch.max(tensor_img) == 1:\n tensor_img = tensor_img*255\n patches = crop_to_patches(tensor_img, top, left, patch_height, patch_width, overlap) #maia's function\n for patch in patches:\n if n_rotations == 0:\n augmentations = patch\n else:\n augmentations = augmentation(patch, n_rotations) # 2 - only non-interpolation augmentation\n patches_all = torch.cat((patches_all, augmentations))\n orig_images += ([image_path] * (len(patches)*4)) # 4 - number of augmentations\n\n return patches_all, orig_images\n\ndef remove_outliers(patch, q1_percentile=0.01, q3_percentile=0.99):\n '''\n the function get a patch and remove the outliers, according to the q1, q3.\n and them normalize it between 0-255.\n '''\n q1 = np.quantile(patch, q1_percentile)\n q3 = np.quantile(patch, q3_percentile)\n\n iqr = q3 - q1\n patch[patch > q3 + 1.5 * iqr] = q3 + 1.5 * iqr\n patch[patch < q1 - 1.5 * iqr] = q1 - 1.5 * iqr\n if not (np.max(patch) - np.min(patch) == 0):\n patch = (patch - np.min(patch)) / (np.max(patch) - np.min(patch))\n patch = patch * 255\n return patch\n\ndef save_patches(patches, output_folder, q1_percentile=0.01, q3_percentile=0.99, patch_name=\"patch\"):\n '''\n save the patches to a folder as jpg images.\n :param patches:\n :param output_folder:\n :return:\n '''\n output_folder = Path(output_folder)\n if not output_folder.exists():\n output_folder.mkdir()\n for i, patch in enumerate(patches):\n patch = patch.squeeze()\n patch = patch.numpy()\n patch = remove_outliers(patch, q1_percentile, q3_percentile)\n patch = patch.astype(np.uint8)\n patch = Image.fromarray(patch)\n patch.save(output_folder / f\"{patch_name}_{i}.jpg\")\n","repo_name":"tavnah/improved_diffusion_for_SMLM","sub_path":"improved_diffusion_for_SMLM/scripts/patches.py","file_name":"patches.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26128148361","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom BuildingBlocks import *\n\n\nclass BasicModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.m_dropoutProb = 0.5\n self.m_dropout3d = nn.Dropout3d(p=self.m_dropoutProb)\n self.m_dropout2d = nn.Dropout2d(p=self.m_dropoutProb)\n self.m_dropout1d = nn.Dropout(p=self.m_dropoutProb)\n self.m_optimizer = None\n self.m_lossFuncList = []\n self.m_lossWeightList = []\n\n def forward(self, x):\n pass\n\n def setOptimizer(self, optimizer):\n self.m_optimizer = optimizer\n\n def appendLossFunc(self, lossFunc, weight = 1.0):\n self.m_lossFuncList.append(lossFunc)\n self.m_lossWeightList.append(weight)\n\n def getOnlyLossFunc(self):\n assert (1 == len(self.m_lossFuncList))\n return self.m_lossFuncList[0]\n\n def lossFunctionsInfo(self):\n return f'Loss Functions List: ' + f'\\t'.join(f'{type(loss).__name__} with weight of {weight}; ' for loss, weight in zip(self.m_lossFuncList, self.m_lossWeightList))\n\n def updateLossWeightList(self, weightList):\n self.m_lossWeightList = weightList\n\n def getLossWeightList(self):\n return self.m_lossWeightList\n\n def batchTrain(self, inputs, labels):\n self.m_optimizer.zero_grad()\n outputs = self.forward(inputs)\n loss = torch.tensor(0.0).to(inputs.device)\n for lossFunc, weight in zip(self.m_lossFuncList, self.m_lossWeightList):\n if weight == 0:\n continue\n loss += lossFunc(outputs,labels)*weight\n loss.backward()\n self.m_optimizer.step()\n return outputs, loss.item()\n\n def batchTrainMixup(self, inputs, labels1, labels2, lambdaInBeta):\n self.m_optimizer.zero_grad()\n outputs = self.forward(inputs)\n loss = torch.tensor(0.0).to(inputs.device)\n for lossFunc, weight in zip(self.m_lossFuncList, self.m_lossWeightList):\n if weight == 0:\n continue\n if lambdaInBeta != 0:\n loss += lossFunc(outputs,labels1)*weight*lambdaInBeta\n if 1-lambdaInBeta != 0:\n loss += lossFunc(outputs,labels2)*weight*(1-lambdaInBeta)\n loss.backward()\n self.m_optimizer.step()\n return outputs, loss.item()\n\n def batchTest(self, inputs, labels):\n outputs = self.forward(inputs)\n loss = torch.tensor(0.0).to(inputs.device)\n for lossFunc, weight in zip(self.m_lossFuncList, self.m_lossWeightList):\n if weight == 0:\n continue\n loss += lossFunc(outputs, labels) * weight\n return outputs, loss.item()\n\n def getParametersScale(self):\n sumPara = 0\n params = self.parameters()\n for param in params:\n sumPara += param.nelement()\n return f\"Network has total {sumPara:,d} parameters.\"\n\n def setDropoutProb(self, prob):\n self.m_dropoutProb = prob\n self.m_dropout2d.p = prob\n self.m_dropout3d.p = prob\n return f\"Info: network dropout rate = {self.m_dropoutProb}\"\n\n def getLR(self):\n return float(self.m_optimizer.param_groups[0]['lr'])\n\n @staticmethod\n def getConvOutputTensorSize(inputSize, filter, stride, padding):\n dim = len(inputSize)\n xSize = list(inputSize)\n for i in range(dim):\n xSize[i] = (xSize[i] + 2*padding[i]- filter[i]) // stride[i] + 1\n xSize = tuple(xSize)\n return xSize\n\n @staticmethod\n def getConvTransposeOutputTensorSize(inputSize, filter, stride, padding):\n dim = len(inputSize)\n xSize = list(inputSize)\n for i in range(dim):\n xSize[i] = (xSize[i] - 1)*stride[i] - 2* padding[i]+filter[i]\n xSize = tuple(xSize)\n return xSize\n\n @staticmethod\n def getProduct(aTuple):\n prod = 1\n for x in aTuple:\n prod *= x\n return prod\n\n @staticmethod\n def isTensorSizeLessThan(tensorSize, value):\n for x in tensorSize:\n if x < value:\n return True\n return False\n\n @staticmethod\n def addDownBBList(inputSize, Cin, Cout, nDownSamples, nInBB):\n downList = nn.ModuleList()\n outputSize = inputSize\n dim = len(inputSize)\n filter = (3,) * dim\n stride = (2,) * dim\n padding = (0,) * dim\n for i in range(nDownSamples):\n if 0 == i:\n downList.append(DownBB(Cin, Cout, filter1st=filter, stride=stride, nLayers=nInBB))\n else:\n downList.append(DownBB(Cout, Cout, filter1st=filter, stride=stride, nLayers=nInBB))\n\n outputSize = BasicModel.getConvOutputTensorSize(outputSize, filter, stride, padding)\n if BasicModel.isTensorSizeLessThan(outputSize, 3):\n print(f\"Warning: at the {i}th downSample with inputSize = {inputSize}, the outputSize = {outputSize} has elements less than 3.\")\n break\n\n return downList, outputSize\n\n @staticmethod\n def addUpBBList(inputSize, Cin, Cout, nUpSamples, nInBB):\n downList = nn.ModuleList()\n outputSize = inputSize\n dim = len(inputSize)\n filter = (3,) * dim\n stride = (2,) * dim\n padding = (0,) * dim\n for i in range(nUpSamples):\n if 0 == i:\n downList.append(UpBB(Cin, Cout, filter1st=filter, stride=stride, nLayers=nInBB))\n else:\n downList.append(UpBB(Cout, Cout, filter1st=filter, stride=stride, nLayers=nInBB))\n\n outputSize = BasicModel.getConvTransposeOutputTensorSize(outputSize, filter, stride, padding)\n return downList, outputSize\n\n @staticmethod\n def addDownBBListWithMoreFilters(inputSize, Cin, nDownSamples, nInBB):\n \"\"\"\n\n :param inputSize:\n :param Cin:\n :param nDownSamples:\n :param nInBB:\n :return: downList, outputSize, C at the final layer.\n \"\"\"\n downList = nn.ModuleList()\n outputSize = inputSize\n dim = len(inputSize)\n filter = (3,) * dim\n stride = (2,) * dim\n padding = (0,) * dim\n C = Cin # channels number\n for i in range(nDownSamples):\n downList.append(DownBB(C, 2*C, filter1st=filter, stride=stride, nLayers=nInBB))\n outputSize = BasicModel.getConvOutputTensorSize(outputSize, filter, stride, padding)\n C = 2*C\n if BasicModel.isTensorSizeLessThan(outputSize, 3):\n print(\n f\"Warning: at the {i}th downSample with inputSize = {inputSize}, the outputSize = {outputSize} has elements less than 3.\")\n break\n\n return downList, outputSize, C\n\n @staticmethod\n def addUpBBListWithLessFilters(inputSize, Cin, nUpSamples, nInBB):\n \"\"\"\n\n :param inputSize:\n :param Cin:\n :param nUpSamples:\n :param nInBB:\n :return: downList, outputSize, C at the final layer\n \"\"\"\n downList = nn.ModuleList()\n outputSize = inputSize\n dim = len(inputSize)\n filter = (3,) * dim\n stride = (2,) * dim\n padding = (0,) * dim\n C = Cin\n for i in range(nUpSamples):\n downList.append(UpBB(C, C//2, filter1st=filter, stride=stride, nLayers=nInBB))\n outputSize = BasicModel.getConvTransposeOutputTensorSize(outputSize, filter, stride, padding)\n C = C//2\n return downList, outputSize, C\n\n @staticmethod\n def initializeWeights(m):\n \"\"\"\n copy from https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5 at June 6th, 2019\n :param m: model.\n :return:\n \"\"\"\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n # init.normal_(m.bias.data)\n init.constant_(m.bias.data, 0)\n\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n else:\n #print(f\"{m.__class__.__name__} does not support initialization in initializeWeights function.\")\n pass\n\n\n","repo_name":"templeblock/OvarianCancer","sub_path":"BasicModel.py","file_name":"BasicModel.py","file_ext":"py","file_size_in_byte":10441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30167543981","text":"\"\"\"Production settings and globals.\"\"\"\nfrom base import *\n\n\n########## HOST CONFIGURATION\n# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production\nMAIN_HOST = ['openbilanci.staging.deppsviluppo.org',]\n\n# Allowed hosts expansion: needed for servizi ai Comuni\nHOSTS_COMUNI = [\n'novara.comuni.deppsviluppo.org',\n'rapallo.comuni.deppsviluppo.org',\n'castiglionedellestiviere.comuni.deppsviluppo.org',\n'firenze.comuni.deppsviluppo.org',\n'terni.comuni.deppsviluppo.org'\n]\n\nALLOWED_HOSTS += MAIN_HOST + HOSTS_COMUNI\n########## END HOST CONFIGURATION\n\n########## EMAIL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n########## END EMAIL CONFIGURATION\n\n\n########## TOOLBAR CONFIGURATION\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nINSTALLED_APPS += (\n 'debug_toolbar',\n)\n\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nINTERNAL_IPS = ('176.31.74.29',)\n\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nMIDDLEWARE_CLASSES = (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\ndef show_toolbar(request):\n print(\"IP Address for debug-toolbar: \" + request.META['REMOTE_ADDR'])\n return True\nSHOW_TOOLBAR_CALLBACK = show_toolbar\nDEBUG_TOOLBAR_PATCH_SETTINGS=False\n\n########## END TOOLBAR CONFIGURATION\n\n\nBILANCI_PATH = \"/home/open_bilanci/dati/bilanci_subset\"\nOUTPUT_FOLDER = '../scraper_project/scraper/output/'\nLISTA_COMUNI = 'listacomuni.csv'\nLISTA_COMUNI_PATH = OUTPUT_FOLDER + LISTA_COMUNI\n\nPATH_PREVENTIVI = BILANCI_PATH+\"/%s/%s/Preventivo/%s.html\"\nPATH_CONSUNTIVI = BILANCI_PATH+\"/%s/%s/Consuntivo/%s.html\"\n\nBILANCI_RAW_DB = 'bilanci_raw'\n\n\n","repo_name":"DeppSRL/open_bilanci","sub_path":"bilanci_project/bilanci/settings/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"10054626399","text":"\"\"\"\n@Author:WangYuXiang\n@E-mile:Hill@3io.cc\n@CreateTime:2021/3/31 16:21\n@DependencyLibrary:无\n@MainFunction:无\n@FileDoc: \n authentication.py\n 文件说明\n@ChangeHistory:\n datetime action why\n example:\n 2021/3/31 16:21 change 'Fix bug'\n \n\"\"\"\nimport jwt\nfrom jwt import ExpiredSignatureError\n\nfrom sanic_rest_framework.exceptions import APIException\nfrom sanic_rest_framework.request import SRFRequest\nfrom sanic_rest_framework.status import HttpStatus\n\n\nclass BaseAuthenticate:\n def authenticate(self, request: SRFRequest, **kwargs):\n \"\"\"验证权限并返回User对象\"\"\"\n # request.headers['']\n\n\nclass BaseTokenAuthenticate(BaseAuthenticate):\n \"\"\"基于Token的基础验证 JWT \"\"\"\n token_key = 'X-Token'\n\n async def authenticate(self, request: SRFRequest, **kwargs):\n \"\"\"验证逻辑\"\"\"\n token = request.headers.get(self.token_key)\n if token is None:\n raise APIException(message='授权错误:请求头{}不存在'.format(self.token_key), http_status=HttpStatus.HTTP_401_UNAUTHORIZED)\n token_secret = request.app.config.TOKEN_SECRET\n try:\n token_info = self.authentication_token(token, token_secret)\n except ExpiredSignatureError:\n raise APIException(message='授权已过期,请重新登录', http_status=HttpStatus.HTTP_401_UNAUTHORIZED)\n await self._authenticate(request, token_info, **kwargs)\n\n async def _authenticate(self, request: SRFRequest, token_info: dict, **kwargs):\n \"\"\"主要处理逻辑\"\"\"\n pass\n\n def authentication_token(self, token, token_secret):\n \"\"\"\n 解包Token\n :param token: 口令\n :param token_secret: 解密秘钥\n :return:\n \"\"\"\n token_info = jwt.decode(token, token_secret, algorithms=['HS256'])\n return token_info\n","repo_name":"OpenHill/sanic_rest_framework","sub_path":"authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33569328734","text":"from airtest.core.api import *;\nimport start_border;\n\nlocate_path = \"photo/locate\";\ngoing_path = \"photo/going\";\nphoto_path = \"photo\";\n\n\n# 定位所在位置\ndef loc_address():\n # 在边境房间(或者是结算页面,存在没有返回成功情况)\n if exists(Template(locate_path + r\"\\在边境房.png\", threshold=0.8)):\n # 小概率存在点了返回但是无响应的情况,此时会一直停留在结算页面\n if exists(Template(photo_path + r\"\\返回房间.png\")):\n back_loc = wait(Template(photo_path + r\"\\返回房间.png\"), timeout=10, interval=1);\n double_click(back_loc);\n # 在边境页面\n elif exists(Template(locate_path + r\"\\在边境主页.png\", threshold=0.8)):\n border_page_to_room();\n # 在大厅\n elif exists(Template(locate_path + r\"\\在大厅.png\", threshold=0.8)):\n home_to_room();\n # 在排位页面\n elif exists(Template(locate_path + r\"\\在排位.png\", threshold=0.8)):\n rank_to_room();\n # 在跑图页面(解决匹配20秒后才进图的问题)\n elif exists(Template(locate_path + r\"\\跑图页面.png\", threshold=0.7)):\n start_border.hang_up();\n # else:\n # keyevent(\"26\");\n # raise Exception(\"五个页面都不在,息屏!\");\n\n\n# 大厅到边境房间\ndef home_to_room():\n match_loc = wait(Template(locate_path + r\"\\在大厅.png\"), timeout=10, interval=1);\n click(match_loc);\n rank_to_room();\n\n\n# 排位页面到边境房间\ndef rank_to_room():\n border_loc = wait(Template(going_path + r\"\\点击左侧边境战争.png\"), timeout=10, interval=1);\n click(border_loc);\n # 排位也有个人竞速按钮,切换时先睡一下,避免识别到排位的个人竞速\n sleep(0.5);\n border_page_to_room();\n\n\n# 边境页面到边境房间\ndef border_page_to_room():\n person_speed_loc = wait(Template(going_path + r\"\\点击边境个人竞速.png\", threshold=0.8), timeout=10, interval=1);\n click(person_speed_loc);\n coupon_60_loc = wait(Template(going_path + r\"\\点击60点券.png\"), timeout=10, interval=1);\n click(coupon_60_loc);\n","repo_name":"yilko/scripts","sub_path":"qspeed_hangup/locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22034651623","text":"\"\"\"\nCreated on September 15, 2019\n\n@author: Yi Wang\n\"\"\"\n\nimport copy\nimport h5py\nfrom netCDF4 import Dataset\n\n\ndef read_OMI_NO2_L3(filename, varnames=[], replace=False, verbose=False):\n \"\"\" read OMI L3 NO2 product\n\n Parameters\n ----------\n filename : str\n OMI L3 NO2 product file.\n varnames : list\n A list of variable names\n replace : logical\n True: replace all_varnames by varnames\n False: extend varnames to all_varnames\n verbose : logical\n Whether or not output more informations.\n\n Returns\n -------\n out_data : dict\n A dictionary of all varilables.\n\n \"\"\"\n\n if verbose:\n print(' - read_OMI_NO2_L3: reading ' + filename)\n\n # all variable names\n all_varnames = [\n '/HDFEOS/GRIDS/ColumnAmountNO2/Data Fields/ColumnAmountNO2TropCloudScreened', \n ]\n if not replace:\n all_varnames.extend(varnames)\n all_varnames = list(set(all_varnames))\n else:\n all_varnames = varnames\n\n # open dataset\n infile = h5py.File(filename, 'r')\n\n # read variables\n out_data = dict()\n for varname in all_varnames:\n varn = varname.split('/')[-1]\n out_data[varn] = infile[varname][:]\n\n # close dataset\n infile.close()\n\n return out_data\n","repo_name":"ywang37/pylib","sub_path":"read_omi.py","file_name":"read_omi.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27044182286","text":"from typing import List, Tuple, Optional, Union\n\nfrom pygame.math import Vector2\n\nfrom asserts.sourse.csv_reader import CsvOpen\n\nLEVELS = (\n \"asserts/maps/level1.csv\",\n \"asserts/maps/level2.csv\",\n \"asserts/maps/level3.csv\"\n)\n\nmapT = List[List[Union[int, float]]]\n\n\ndef load_csv(file_path) -> mapT:\n with CsvOpen(file_path, \"r\") as file:\n data = list(map(lambda e: list(map(int, e)), file))\n return data\n\n\ndef load_level(level: int) -> Optional[Tuple[Vector2, Vector2, mapT]]:\n str_l = f\"asserts/maps/level{level}.csv\"\n if str_l not in LEVELS:\n return None\n csv = load_csv(str_l)\n # noinspection PyTypeChecker\n info: Tuple[int, int, int, int] = tuple(csv[0])\n spawn = Vector2(float(info[0]), float(info[1]))\n win = Vector2(float(info[2]), float(info[3]))\n rest: mapT = csv[1:]\n return spawn, win, rest\n","repo_name":"gresm/pygame_february","sub_path":"asserts/maps/maps_manager.py","file_name":"maps_manager.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"74220020908","text":"import pymongo\nimport argparse\nimport zarr\nimport json\nimport numpy as np\nimport os\nimport logging\nfrom CNNectome.utils.label import Label\nfrom CNNectome.utils import config_loader\nfrom CNNectome.utils.cosem_db import MongoCosemDB\nfrom scipy.ndimage.morphology import (\n generate_binary_structure,\n binary_dilation,\n binary_erosion,\n distance_transform_edt,\n)\nfrom typing import Any, Dict, List, Optional, Tuple\n\n\ndef distance(labelfield: np.ndarray, **kwargs: Any) -> np.ndarray:\n \"\"\"\n Compute signed distance transform of binary labelfield.\n\n Args:\n labelfield: Array of binary labels.\n **kwargs: Additional keyword arguments passed on to `scipy.ndimage.morphology.distance_transform_edt`\n\n Returns:\n Signed distance transform\n \"\"\"\n inner_distance = distance_transform_edt(\n binary_erosion(\n labelfield,\n border_value=1,\n structure=generate_binary_structure(labelfield.ndim, labelfield.ndim),\n ),\n **kwargs\n )\n outer_distance = distance_transform_edt(np.logical_not(labelfield), **kwargs)\n result = inner_distance - outer_distance\n return result\n\n\ndef count_with_add(labelfield: np.ndarray, labelid: int, add_constant: int) -> int:\n \"\"\"\n Count effective frequency of label that is computationally expanded.\n\n Args:\n labelfield: Array of labels.\n labelid: Id of label to count frequency of.\n add_constant: Constant to add to distances.\n\n Returns:\n Effective frequency of label in the array.\n \"\"\"\n binary_labelfield = labelfield == labelid\n distances = distance(binary_labelfield, sampling=(2, 2, 2)) + add_constant\n return np.sum(distances > 0)\n\n\ndef one_crop(\n crop: Dict[str, Any], labels: List[Label], gt_version: str = \"v0003\"\n) -> Tuple[Dict[int, int], Dict[int, int]]:\n \"\"\"\n Calculate the distribution of `labels` for one particular crop.\n\n Args:\n crop: Instance of an entry in the crop database.\n gt_version: Version of groundtruth annotations, e.g. \"v0003\"\n labels: List of labels to compute distribution for.\n\n Returns:\n Dictionaries of number of positive and negative annotations per label in this `crop`.\n \"\"\"\n data_dir = config_loader.get_config()[\"organelles\"][\"data_path\"]\n n5file = zarr.open(str(os.path.join(data_dir, crop[\"parent\"])), mode=\"r\")\n blueprint_label_ds = (\n \"volumes/groundtruth/{version:}/crop{cropno:}/labels/{{label:}}\"\n )\n blueprint_labelmask_ds = (\n \"volumes/groundtruth/{version:}/crop{cropno:}/masks/{{label:}}\"\n )\n labelmask_ds = blueprint_labelmask_ds.format(\n version=gt_version.lstrip(\"v\"), cropno=crop[\"number\"]\n )\n label_ds = blueprint_label_ds.format(\n version=gt_version.lstrip(\"v\"), cropno=crop[\"number\"]\n )\n pos = dict()\n neg = dict()\n for l in labels:\n pos[l.labelid[0]] = 0\n neg[l.labelid[0]] = 0\n\n labelfield = np.array(n5file[label_ds.format(label=\"all\")])\n hist, be = np.histogram(\n labelfield, bins=sorted([l.labelid[0] for l in labels] + [100])\n )\n counts = dict()\n for b, h in zip(be, hist):\n counts[b] = h\n\n present_annotated = [ll[0] for ll in crop[\"labels\"][\"present_annotated\"]]\n not_present_annotated = [ll[0] for ll in crop[\"labels\"][\"absent_annotated\"]]\n\n for ll in present_annotated:\n if ll == 34:\n logging.debug(ll)\n try:\n label = [lli for lli in labels if lli.labelid[0] == ll][0]\n except IndexError:\n continue\n labelmask_ds = labelmask_ds.format(label=label.labelname)\n if labelmask_ds in n5file:\n maskfield = np.array(n5file[labelmask_ds])\n size = np.sum(maskfield)\n else:\n size = (\n crop[\"dimensions\"][\"x\"]\n * crop[\"dimensions\"][\"y\"]\n * crop[\"dimensions\"][\"z\"]\n * 8\n )\n if label.separate_labelset:\n sep_labelfield = np.array(n5file[label_ds.format(label=label.labelname)])\n hist, be = np.histogram(\n sep_labelfield, bins=sorted([l.labelid[0] for l in labels] + [100])\n )\n counts_separate = dict()\n for b, h in zip(be, hist):\n counts_separate[b] = h\n if label.add_constant is not None and label.add_constant > 0:\n c = count_with_add(sep_labelfield, ll, label.add_constant)\n else:\n c = counts_separate[ll]\n pos[ll] += c\n neg[ll] += size - c\n\n else:\n if label.add_constant is not None and label.add_constant > 0:\n c = count_with_add(labelfield, ll, label.add_constant)\n else:\n c = counts[ll]\n pos[ll] += c\n neg[ll] += size - c\n for ll in not_present_annotated:\n if ll == 34:\n logging.debug(ll)\n try:\n label = [lli for lli in labels if lli.labelid[0] == ll][0]\n except IndexError:\n continue\n size = (\n crop[\"dimensions\"][\"x\"]\n * crop[\"dimensions\"][\"y\"]\n * crop[\"dimensions\"][\"z\"]\n * 8\n )\n neg[label.labelid[0]] += size\n return pos, neg\n\n\ndef label_dist(\n labels: List[Label],\n completion_min: int = 6,\n dataset: Optional[str] = None,\n gt_version: str = \"v0003\",\n save: Optional[str] = None,\n) -> Dict[str, Dict[int, int]]:\n \"\"\"\n Compute label distribution.\n\n Args:\n labels: List of labels to compute distribution for.\n completion_min: Minimal completion status for a crop from the database to be included in the distribution.\n dataset: Dataset for which to calculate label distribution. If None calculate across all datasets.\n gt_version: Version of groundtruth for which to accumulate distribution.\n save: File to which to save distributions as json. If None, results won't be saved.\n\n Returns:\n Dictionary with distributions per label with counts for \"positives\", \"negatives\" and the sum of both (\"sums\").\n \"\"\"\n db = MongoCosemDB(gt_version=gt_version)\n collection = db.access(\"crops\", db.gt_version)\n db_filter = {\"completion\": {\"$gte\": completion_min}}\n if dataset is not None:\n db_filter[\"dataset_id\"] = dataset\n skip = {\n \"_id\": 0,\n \"number\": 1,\n \"labels\": 1,\n \"parent\": 1,\n \"dimensions\": 1,\n \"dataset_id\": 1,\n }\n positives = dict()\n negatives = dict()\n for ll in labels:\n positives[int(ll.labelid[0])] = 0\n negatives[int(ll.labelid[0])] = 0\n for crop in collection.find(db_filter, skip):\n pos, neg = one_crop(crop, labels, db.gt_version)\n for ll, c in pos.items():\n positives[ll] += int(c)\n for ll, c in neg.items():\n negatives[ll] += int(c)\n\n sums = dict()\n for ll in pos.keys():\n sums[ll] = negatives[ll] + positives[ll]\n stats = dict()\n stats[\"positives\"] = positives\n stats[\"negatives\"] = negatives\n stats[\"sums\"] = sums\n\n if save is not None:\n if not save.endswith(\".json\"):\n save += \".json\"\n with open(save, \"w\") as f:\n json.dump(stats, f)\n\n return stats\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\"Calculate the distribution of labels.\")\n parser.add_argument(\n \"--dataset\",\n type=str,\n help=(\n \"Dataset id for which to calculate label distribution. If None \"\n \"calculate across all datasets.\"\n ),\n )\n parser.add_argument(\n \"--gt_version\", type=str, default=\"v0003\", help=\"Version of groundtruth.\"\n )\n parser.add_argument(\n \"--save\", type=str, help=\"File to save results to as json\", default=\".\"\n )\n args = parser.parse_args()\n\n labels = list()\n labels.append(Label(\"ecs\", 1))\n labels.append(Label(\"plasma_membrane\", 2))\n labels.append(Label(\"mito_lumen\", 4))\n labels.append(Label(\"mito_membrane\", 3))\n labels.append(\n Label(\n \"mito_DNA\",\n 5,\n )\n )\n labels.append(Label(\"golgi_lumen\", 7))\n labels.append(Label(\"golgi_membrane\", 6))\n labels.append(Label(\"vesicle_lumen\", 9))\n labels.append(Label(\"vesicle_membrane\", 8))\n labels.append(\n Label(\n \"MVB_lumen\",\n 11,\n )\n )\n labels.append(Label(\"MVB_membrane\", 10))\n labels.append(Label(\"lysosome_lumen\", 13))\n labels.append(Label(\"lysosome_membrane\", 12))\n labels.append(Label(\"LD_lumen\", 15))\n labels.append(Label(\"LD_membrane\", 14))\n labels.append(Label(\"er_lumen\", 17))\n labels.append(Label(\"er_membrane\", 16))\n labels.append(Label(\"ERES_lumen\", 19))\n labels.append(Label(\"ERES_membrane\", 18))\n labels.append(Label(\"nucleolus\", 29, separate_labelset=True))\n labels.append(Label(\"nucleoplasm\", 28))\n labels.append(Label(\"NE_lumen\", 21))\n labels.append(Label(\"NE_membrane\", 20))\n labels.append(Label(\"nuclear_pore_in\", 23))\n labels.append(Label(\"nuclear_pore_out\", 22))\n labels.append(Label(\"nucleus_generic\", 22))\n labels.append(Label(\"HChrom\", 24))\n labels.append(Label(\"NHChrom\", 25))\n labels.append(Label(\"EChrom\", 26))\n labels.append(Label(\"NEChrom\", 27))\n labels.append(Label(\"microtubules_in\", 36))\n labels.append(Label(\"microtubules_out\", 30))\n labels.append(Label(\"centrosome\", 31, add_constant=2, separate_labelset=True))\n labels.append(Label(\"distal_app\", 32))\n labels.append(Label(\"subdistal_app\", 33))\n labels.append(Label(\"ribosomes\", 34, add_constant=8, separate_labelset=True))\n labels.append(Label(\"nucleus_generic\", 37))\n if args.dataset == \"None\":\n dataset = None\n else:\n dataset = args.dataset\n label_dist(labels, dataset=dataset, gt_version=args.gt_version, save=args.save)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/utils/compute_label_distribution.py","file_name":"compute_label_distribution.py","file_ext":"py","file_size_in_byte":9934,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"25154018663","text":"\n# coding: utf-8\n\n# #### Modules\n\n# In[9]:\n\n\nimport numpy as np\nimport pandas as pd\nimport random\nimport os\nimport sys\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nrandom.seed(1000)\nnp.random.seed(100)\nfrom pathlib import Path\n\n\n# ### Variable declaration\n\n# In[15]:\n\n\ndataset_file_name = \"input.xlsx\"\ncandid_indexes = []\nfileName = os.path.abspath(dataset_file_name)\n\n\n# ### Classifier\n\n# In[16]:\n\n\ndef trainModel(train_file,candidate_indexes,size=0.25):\n data_input_interim = pd.read_excel(train_file)\n data_input = data_input_interim.drop(candidate_indexes,axis=0)\n Y = data_input[\"Outcome\"]\n data_input.drop(\"Outcome\",axis=1,inplace=True)\n feature_cols = data_input.columns\n X = data_input[feature_cols]\n X_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=size, random_state=0)\n\n clf = RandomForestClassifier(max_depth = 20, \n min_samples_split=2, \n n_estimators = 100, \n random_state = 1)\n clf = clf.fit(X_train, Y_train)\n return clf\n\n\n# In[17]:\n\n\nclf_model = trainModel(fileName,candid_indexes,size=0.25)\n\n\n# In[18]:\n\n\ndef pred(test_df):\n res = clf_model.predict_proba(test_df)\n return res\n\n","repo_name":"ppouyaa/desirable-properties-master","sub_path":"old_files/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35255135442","text":"# import cv2 as cv\n# import numpy as np\n#\n# img = cv.imread('lena.jpg')\n# lr1 = cv.pyrDown(img) #low resolution image\n# lr2 = cv.pyrDown(lr1)\n# hr2 = cv.pyrUp(lr2)\n# hr1 = cv.pyrUp(hr2)\n#\n# cv.imshow('OG', img)\n# cv.imshow('pyrdown 1', lr1)\n# cv.imshow('pyrdown 2', lr2)\n# cv.imshow('pyrup 1', hr1)\n# cv.imshow('pyrup 2', hr2)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\n# import cv2 as cv\n# import numpy as np\n#\n# img = cv.imread('lena.jpg')\n# layer = img.copy()\n#\n# gp = [layer] # gaussian pyramid (list of images)\n#\n# for i in range(6):\n# layer = cv.pyrDown(layer)\n# gp.append(layer)\n# cv.imshow(str(i), layer)\n#\n# cv.imshow('OG', img)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\nimport cv2 as cv\nimport numpy as np\n\nimg = cv.imread('lena.jpg')\nlayer = img.copy()\n\ngp = [layer] #gaussian pyramid\n\nfor i in range(6):\n layer = cv.pyrDown(layer)\n gp.append(layer)\n #cv.imshow(str(i), layer)\n\nlayer = gp[5]\ncv.imshow('upper level', layer)\nlp = [layer] #laplacian pyramid\n\nfor i in range(5,0,-1):\n gaussian_extended = cv.pyrUp(gp[i])\n laplacian = cv.subtract(gp[i-1], gaussian_extended) # the result is like edge detection\n cv.imshow(str(i), laplacian)\n\ncv.imshow('OG', img)\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"Thesirloc/OpenCV-learn","sub_path":"pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43500456257","text":"#!/usr/bin/env python3\n\ntry:\n integer_types = (int, long)\n range = xrange\nexcept NameError: # Python 3\n integer_types = (int,)\n\nfrom graphtheory.coloring.edgecolorcs import ConnectedSequentialEdgeColoring\n\n\nclass NTLEdgeColoring:\n \"\"\"Find the NTL (Nishizeki, Terada, Leven) edge coloring.\n \n The algorithm is using Delta or Delta+1 colors.\n Based on Java code from\n https://github.com/martakuzak/GIS\n \n Attributes\n ----------\n graph : input undirected graph or multigraph\n color : dict with edges (values are colors)\n m : number (the number od edges)\n missing : dict with nodes (values are sets of missing colors)\n \n Notes\n -----\n Colors are 0, 1, 2, ...\n edge.source < edge.target for any edge in color.\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n if graph.is_directed():\n raise ValueError(\"the graph is directed\")\n self.graph = graph\n self.color = dict()\n self.m = 0 # graph.e() is slow\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError(\"a loop detected\")\n else:\n self.color[edge] = None # edge.source < edge.target\n self.m += 1\n if len(self.color) < self.m:\n raise ValueError(\"edges are not unique\")\n # dict with missing colors for nodes.\n self.missing = None\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n Delta = max(self.graph.degree(node) for node in self.graph.iternodes())\n if Delta <= 2:\n # Greedy coloring suffies.\n algorithm = ConnectedSequentialEdgeColoring(self.graph)\n algorithm.run()\n self.color = algorithm.color\n else:\n # Ustal liczbe wykorzystywanych kolorow.\n k = Delta + 1 # almost optimal (simple graphs!)\n self.missing = dict((node, set(range(k)))\n for node in self.graph.iternodes())\n for edge in self.graph.iteredges():\n # Sprawdz wspolny kolor brakujacy.\n # To mozna chyba zrobic bardziej wydajnie.\n both = self.missing[edge.source] & self.missing[edge.target]\n if len(both) == 0:\n self._recolor(edge)\n else:\n c = min(both) # choose min color available\n self._add_color(edge, c)\n\n def _add_color(self, edge, c):\n \"\"\"Add color.\"\"\"\n if edge.source > edge.target:\n edge = ~edge\n self.color[edge] = c\n self.missing[edge.source].remove(c)\n self.missing[edge.target].remove(c)\n\n def _del_color(self, edge, c):\n \"\"\"Delete color.\"\"\"\n if edge.source > edge.target:\n edge = ~edge\n self.color[edge] = None\n self.missing[edge.source].add(c)\n self.missing[edge.target].add(c)\n\n def _get_color(self, edge):\n \"\"\"Get color.\"\"\"\n if edge.source > edge.target:\n edge = ~edge\n return self.color[edge]\n\n def show_colors(self):\n \"\"\"Show edge coloring (undirected graphs).\"\"\"\n L = []\n for source in self.graph.iternodes():\n L.append(\"{} : \".format(source))\n for edge in self.graph.iteroutedges(source):\n # It should work for multigraphs.\n c = self._get_color(edge)\n L.append(\"{}({}) \".format(edge.target, c))\n L.append(\"\\n\")\n print(\"\".join(L))\n\n def _recolor(self, edge):\n \"\"\"Swap edge colors.\"\"\"\n # Przygotowanie kolorow brakujacych m(*).\n mis = dict((node, min(self.missing[node]))\n for node in self.graph.iternodes())\n # 1. Tworzymy wachlarz dla krawedzi edge.\n # 2. Wachlarz rozpoczyna sie od wierzcholka w_0 (krawedz edge).\n fan = [edge] # tu beda cale krawedzie wychodzace z edge.source\n # 3. Zbior do szybkiego sprawdzania, czy wierzcholek nalezy do wachlarza.\n fan_set = set([edge.target]) # zbior koncow krawedzi\n # 5. alpha to kolor brakujacy dla edge.source\n alpha = mis[edge.source]\n tmp_v = edge.target # zmienna do chodzenia po koncach krawedzi wachlarza\n finished = False\n # 7. W petli szukamy kolejnych krawedzi wachlarza.\n while not finished:\n finished = True\n for edge1 in self.graph.iteroutedges(edge.source):\n # Kolor krawedzi ma byc kolorem brakujacym poprzedniego wierzcholka.\n c = self._get_color(edge1)\n if c == mis[tmp_v] and edge1.target not in fan_set:\n # 12. Dodajemy krawedz do wachlarza.\n tmp_v = edge1.target\n fan.append(edge1)\n fan_set.add(edge1.target)\n finished = False\n break\n # 14. Wachlarz zostal skonstruowany.\n # tmp_v oznacza teraz ostatni wierzcholek wachlarza w_s.\n # 15. Definiujemy kolor beta jako kolor brakujacy wierzcholka w_s.\n beta = mis[tmp_v]\n # 16. Jezeli kolor brakujacy w_s jest rowniez kolorem brakujacym\n # edge.source, to mozemy przesunac wachlarz, a krawedz fan[-1]\n # pokolorowac kolorem beta.\n if beta in self.missing[edge.source]:\n #print \"PRZYPADEK 1\"\n # 17. Przesuwamy kolory w wachlarzu.\n for i in range(len(fan)-1):\n edge1 = fan[i]\n edge2 = fan[i+1]\n c = mis[edge1.target] # to chcemy dac edge1\n self._del_color(edge2, c)\n self._add_color(edge1, c)\n # 25. Kolor beta dajemy ostatniej krawedzi wachlarza.\n edge1 = fan[-1]\n self._add_color(edge1, beta)\n else:\n #print \"PRZYPADEK 2\" # beta not in self.missing[edge.source]\n # 29. Tworzymy sciezke o poczatku w w_s i skladajaca sie\n # z krawedzi na przemian kolorow alpha i beta.\n path = [] # tu beda cale krawedzie\n path_set = set([tmp_v]) # w_s, aby przyspieszyc wyszukiwanie\n tmp2_v = tmp_v # chodzi po wierzcholkach sciezki\n finished = False\n # 34. Zmienna parity, pozwala kontrolowac, czy nastepna krawedz\n # powinna byc pokolorowana kolorem alpha czy beta\n parity = 0\n # 35. W petli szukamy kolejnych krawedzi sciezki path.\n while not finished:\n finished = True\n if parity % 2 == 0: # kolor alpha\n for edge1 in self.graph.iteroutedges(tmp2_v):\n # Kolor krawedzi ma byc alpha.\n c = self._get_color(edge1)\n if c == alpha and edge1.target not in path_set:\n tmp2_v = edge1.target\n path.append(edge1)\n path_set.add(edge1.target)\n finished = False\n break\n else: # parity % 2 == 1, kolor beta\n for edge1 in self.graph.iteroutedges(tmp2_v):\n # Kolor krawedzi ma byc beta.\n c = self._get_color(edge1)\n if c == beta and edge1.target not in path_set:\n tmp2_v = edge1.target\n path.append(edge1)\n path_set.add(edge1.target)\n finished = False\n break\n # 44. Przed przejsciem do szukania kolejnego wierzcholka\n # sciezki zmieniamy parity.\n parity += 1\n # 45. Sciezka path zostala skonstrowana. Sciezka moze nie istniec.\n # Teraz trzeba sprawdzic, czy nie dochodzimy do edge.source\n # lub do brzegu wachlarza.\n if len(path) == 0:\n # Przesuwamy kolory w wachlarzu.\n for i in range(len(fan)-1):\n edge1 = fan[i]\n edge2 = fan[i+1]\n c = mis[edge1.target] # to chcemy dac edge1\n self._del_color(edge2, c)\n self._add_color(edge1, c)\n # Kolor alpha dajemy ostatniej krawedzi wachlarza.\n edge1 = fan[-1]\n self._add_color(edge1, alpha)\n elif path[-1].target == edge.source:\n # path dochodzi do edge.source kolorem beta.\n # Odwracamy kolory na sciezce.\n # Osobno usuwam, a potem dodaje kolory, aby nie posypalo\n # sie uaktualnianie kolorow w missing.\n # Najpierw usuwam kolory (pierwszy to alpha), bez ostatniego.\n for i in range(len(path)-1): # bez ostatniej krawedzi\n c = alpha if (i % 2 == 0) else beta\n self._del_color(path[i], c)\n # Krawedz path[-1] nalezy do wachlarza i ma jeszcze kolor beta.\n # Przesuwamy kolory w wachlarzu, ale nie do konca.\n for i in range(len(fan)-1):\n edge1 = fan[i]\n edge2 = fan[i+1]\n c = mis[edge1.target] # to chcemy dac edge1\n self._del_color(edge2, c)\n self._add_color(edge1, c)\n if c == beta:\n break\n # Teraz jedna krawedz wachlarza, wspolna ze sciezka,\n # nie ma koloru.\n # Dodaje odwrocone kolory w path (pierwszy to beta).\n for i, edge1 in enumerate(path): # cala sciezka\n c = beta if (i % 2 == 0) else alpha\n self._add_color(edge1, c)\n # Dalej path[-1].target != edge.source\n elif path[-1].target in fan_set and (len(path) % 2 == 1):\n # path ma dlugosc nieparzysta i osiaga wierzcholek\n # nalezacy do wachlarza klawedzia koloru alpha.\n # Osobno usuwam, a potem dodaje kolory, aby nie posypalo\n # sie uaktualnianie kolorow w missing.\n # Najpierw usuwam kolory (pierwszy to alpha).\n for i, edge1 in enumerate(path): # cala sciezka\n c = alpha if (i % 2 == 0) else beta\n self._del_color(edge1, c)\n # Mozemy przypadkiem trafic w pierwsza krawedz wachlarza.\n if path[-1].target == edge.target:\n # Nie przesuwamy wachlarza.\n # Dodaje krawedz wachlarza do sciezki dla wygody.\n path.append(~edge) # odwrotny kierunek!\n # Teraz sciezka ma parzysta liczbe krawedzi.\n else:\n # Przesuwamy kolory w wachlarzu, ale nie do konca.\n for i in range(len(fan)-1):\n edge1 = fan[i]\n edge2 = fan[i+1]\n c = mis[edge1.target] # to chcemy dac edge1\n self._del_color(edge2, c)\n self._add_color(edge1, c)\n if edge2.target == path[-1].target:\n # Dodaje krawedzi wachlarza do sciezki, aby\n # latwiej nadac jej kolor.\n path.append(~edge2) # odwrotny kierunek!\n # Teraz sciezka ma parzysta liczbe krawedzi.\n break\n # Dodaje odwrocone kolory w path\n # (pierwszy to beta, ostatni to alpha).\n for i, edge1 in enumerate(path):\n c = beta if (i % 2 == 0) else alpha\n self._add_color(edge1, c)\n else:\n # path moze sie konczyc kolorem alpha lub beta.\n # Przesuwamy kolory w wachlarzu.\n for i in range(len(fan)-1):\n edge1 = fan[i]\n edge2 = fan[i+1]\n c = mis[edge1.target] # to chcemy dac edge1\n self._del_color(edge2, c)\n self._add_color(edge1, c)\n # Ostatnia krawedz wachlarza jest teraz bez koloru.\n # Odwracamy kolory na sciezce.\n # Osobno usuwam, a potem dodaje kolory, aby nie posypalo\n # sie uaktualnianie kolorow w missing.\n # Najpierw usuwam kolory (pierwszy to alpha).\n for i, edge1 in enumerate(path): # cala sciezka\n c = alpha if (i % 2 == 0) else beta\n self._del_color(edge1, c)\n # Teraz dodaje odwrocone kolory (pierwszy to beta).\n for i, edge1 in enumerate(path):\n c = beta if (i % 2 == 0) else alpha\n self._add_color(edge1, c)\n # Obrocilismy sciezke.\n # Kolor alpha dajemy ostatniej krawedzi wachlarza.\n edge1 = fan[-1]\n self._add_color(edge1, alpha)\n\n# EOF\n","repo_name":"ufkapano/graphtheory","sub_path":"graphtheory/coloring/edgecolorntl.py","file_name":"edgecolorntl.py","file_ext":"py","file_size_in_byte":13088,"program_lang":"python","lang":"pl","doc_type":"code","stars":42,"dataset":"github-code","pt":"37"} +{"seq_id":"3008744462","text":"def potenciaRecursiva(k, n):\n if n == 0:\n return 1\n return k * potenciaRecursiva(k, n-1)\n\n\n\nbase = int(input(\"Base: \"))\nexpoente = int(input(\"Expoente: \"))\nprint(\"{}^{} = {}\".format(base, expoente, potenciaRecursiva( base, expoente)))\n\n\n# Explicação\n\n'''\n\npotência de 2 elevado a 3\n\no primeiro return representa o caso que sabemos, pois\nse o expoente for igual a 0 temos: K^0 = 1\n\nSe o n não for zero, segue:\n\n1° n = 3\n\n2 * potenciaRecursiva(2 , 2) | OBS: potenciaRecursiva(2 , 2) = 4\n2 * 4 = 8\n\n2° n = 2\n\n2 * potenciaRecursiva(2, 1) | OBS: potenciaRecursiva(2 , 1) = 2\n2 * 2 = 4\n\n\n\n\n \n'''\n","repo_name":"everson-ever/Logica-de-programacao-com-Python","sub_path":"Recursividade/potenciaRecursiva.py","file_name":"potenciaRecursiva.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18062603159","text":"from nltk.corpus import wordnet\nimport re\nimport nltk\nfrom textblob import TextBlob\nimport dateparser\n\ndef find_pronoun(sent):\n \"\"\"Given a sentence, find a preferred pronoun to respond with. Returns None if no candidate\n pronoun is found in the input\"\"\"\n pronoun = None\n\n for word, part_of_speech in sent.pos_tags:\n # Disambiguate pronouns\n if part_of_speech == 'PRP' and word.lower() == 'you':\n pronoun = 'I'\n elif part_of_speech == 'PRP' and word == 'I':\n # If the user mentioned themselves, then they will definitely be the pronoun\n pronoun = 'You'\n return pronoun\n\ndef find_verb(sent):\n verb = []\n pos = []\n for word, part_of_speech in sent.pos_tags:\n if part_of_speech.startswith('VB'): # This is a verb\n verb.append(word)\n pos.append(part_of_speech)\n return verb, pos\n\n\ndef find_noun(sent):\n noun = []\n pos = []\n for word, part_of_speech in sent.pos_tags:\n if part_of_speech.startswith('NN'): # This is a noun\n noun.append(word)\n pos.append(part_of_speech)\n return noun, pos\n\ndef find_adjective(sent):\n adj = []\n pos = []\n for word, part_of_speech in sent.pos_tags:\n if part_of_speech.startswith('JJ'): # This is an adjective\n adj.append(word)\n pos.append(part_of_speech)\n return adj, pos\n\nsentence = \"september 6th\"\nparsed = TextBlob(sentence)\nprint(parsed.pos_tags)\nprint(\" \")\n\n'''\nevent = wordnet.synset('event.n.01').lemmas()\nprint([str(lemma.name()) for lemma in event])\n\n\nprint(' ')\n\nschedule = wordnet.synset('schedule.n.01').lemmas()\nprint([str(lemma.name()) for lemma in schedule])\n\nsentence = \"schedule an event\"\n\ndef findWholeWord(w):\n return re.compile(r'\\b({0})\\b'.format(w), flags=re.IGNORECASE).search\n\nprint(findWholeWord('event')(sentence))\nprint(findWholeWord('dule')(sentence))\n\nscheduleSnip = 'study session 1pm to 3pm'\nprint(find_noun(TextBlob(scheduleSnip)))\nfor noun in find_noun(TextBlob(scheduleSnip))[0]:\n\tprint(noun)\n'''\nprint(dateparser.parse(\"next thursday\"))\n\nordinalString = \"this is the 43rd time and 2nd 3rd\"\n'''\nreg = re.compile(r'\\d+')\nfor i in reg.finditer(ordinalString):\n\tfindIterArr = reg.finditer(ordinalString)\n\tprint(ordinalString[findIterArr.start():findIterArr.start() + 4])\n\tordinalString = ordinalString.replace(ordinalString[findIterArr.start():findIterArr.start() + 4],\"\")\n\n\n\nprint(ordinalString)\nregex = re.findall(r'\\d+', ordinalString)\nprint(regex)\n'''\nmonth = ['january','february','march','april','may','june','july','august','september','november','december']\nmonthStr = \"This is in january but also in april\"\n\nfor i in month:\n\tprint(i in monthStr)","repo_name":"Wxia33/Intelligent-Agenda-Manager","sub_path":"nlp/synonym.py","file_name":"synonym.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9745077334","text":"'''Various tools for getting (sometimes astrophysically relevant) colors for plotting.'''\nimport colormath.color_objects\nimport colormath.color_conversions\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport matplotlib.colors as co\n\ndef name2color(name):\n \"\"\"Return the 3-element RGB array of a given color name.\"\"\"\n if '#' in name:\n h = name\n else:\n h = co.cnames[name].lower()\n return co.hex2color(h)\n\n\ndef nm2rgb(inputnm, intensity=1.0):\n\t'''Convert a wavelength (or uniform range of wavelengths) into RGB colors usable by Python.'''\n\tif np.min(inputnm) <= 350.0 or np.max(inputnm) >= 800.0:\n\t\treturn 0,0,0\n\n\t# create an SED, with 10 nm increments\n\twavelengths = np.arange(340.0, 840.0, 10.0)\n\tintensities = np.zeros_like(wavelengths)\n\n\t# add monochromatic light, if the input wavelength has only one value\n\tnm = np.round(np.array(inputnm)/10.0)*10.0\n\twhich = (wavelengths >= np.min(nm)) & (wavelengths <= np.max(nm))\n\n\t# wtf are the units of intensity to feed into SpectralColor?\n\tintensities[which]= 5.0/np.sum(which)*intensity\n\tspectral = colormath.color_objects.SpectralColor(*intensities)\n\trgb = colormath.color_conversions.convert_color(spectral, colormath.color_objects.sRGBColor)\n\treturn rgb.clamped_rgb_r, rgb.clamped_rgb_g, rgb.clamped_rgb_b\n\ndef monochromaticdemo():\n\t'''Test of nm2rgb, for a single wavelength.'''\n\tn = 1000\n\tx = np.linspace(340, 1000, n)\n\tcolors = [nm2rgb(c) for c in x]\n\tplt.ion()\n\n\tfi, ax = plt.subplots(2,1, sharex=True)\n\tax[0].plot(x, [c[0] for c in colors], color='red')\n\tax[0].plot(x, [c[1] for c in colors], color='green')\n\tax[0].plot(x, [c[2] for c in colors], color='blue')\n\tax[1].scatter(x, np.random.normal(0,1,n), color= colors, s=100)\n\tax[1].set_xlim(min(x), max(x))\n\tax[1].set_xlabel('Wavelength (nm)')\n\ndef broadbanddemo(width=50):\n\t'''Test of nm2rgb, for a range of wavelengths.'''\n\n\tn = 1000\n\tx = np.linspace(340, 1000, n)\n\tcolors = [nm2rgb([c-width, c+width]) for c in x]\n\tplt.ion()\n\n\tplt.cla()\n\tfi, ax = plt.subplots(2,1, sharex=True)\n\tax[0].plot(x, [c[0] for c in colors], color='red')\n\tax[0].plot(x, [c[1] for c in colors], color='green')\n\tax[0].plot(x, [c[2] for c in colors], color='blue')\n\tax[1].scatter(x, np.random.normal(0,1,n), color= colors, s=100)\n\tax[1].set_xlim(min(x), max(x))\n","repo_name":"zkbt/zachopy","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21555288301","text":"# coding: utf-8\nfrom pdz.enum.models import Service\nfrom django.contrib.auth.models import Group\nfrom pdz.workers.models import Operator\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nOPERATORS = [\n {'code': '01', 'surname': 'D\\'Andrea', 'name': 'Wanda'},\n {'code': '02', 'surname': 'Matei', 'name': 'Andreea'},\n {'code': '03', 'surname': 'Cavalieri', 'name': 'Giorgia'},\n {'code': '04', 'surname': 'Palmucci', 'name': 'Martina'}\n]\n\n\nclass Command(BaseCommand):\n\n @transaction.commit_on_success\n def handle(self, *args, **options):\n for operator in OPERATORS:\n newoperator = Operator.objects.create(**operator)\n self.stdout.write(\"Creato Operatore %s\" % newoperator)\n self.stdout.write(\"\\n\\nOPERATORI CREATI\")","repo_name":"strippo/bookingest","sub_path":"src/pdz_import/pdz_import/management/commands/create_operators.py","file_name":"create_operators.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40710514614","text":"import numpy as np\nfrom scipy.spatial import Delaunay\n\nfrom ..geometry import find_cut_point\nfrom .TriangleMesh import TriangleMesh\nfrom .StructureQuadMesh import StructureQuadMesh\n\ndef adaptive(mesh, interface, hmax):\n \"\"\"\n @brief 生成自适应的界面拟合网格 \n \"\"\"\n\n mesh.bisect_interface_cell_with_curvature(interface, hmax)\n\n NN = mesh.number_of_nodes()\n cell = mesh.entity('cell')\n edge = mesh.entity('edge')\n edge2cell = mesh.ds.edge_to_cell()\n isInterfaceCell = mesh.mark_interface_cell(phi)\n isInterfaceNode = np.zeros(NN, dtype=np.bool_)\n isInterfaceNode[cell[isInterfaceCell]] = True\n\n isEdge = edge2cell[:, 0] != edge2cell[:, 1]\n isEdge = isEdge & ((isInterfaceNode[edge].sum(axis=1) > 0) |\n (isInterfaceNode[cell[edge2cell[:, 0:2], 0]].sum(axis=1)> 0))\n isEdge = isEdge & (edge2cell[:, 3:].sum(axis=1) == 0)\n\n isEdge = isEdge & (~cellType[edge2cell[:, 0]])\n\n NC = mesh.number_of_cells()\n isMark = np.zeros(NC, dtype=np.bool_)\n isMark[edge2cell[isEdge, 0:2]] = True\n\n mesh.bisect(isMark)\n node = mesh.entity('node')\n phi = np.append(phi, interface(node[NN:]))\n NN = mesh.number_of_nodes()\n\n cell = mesh.entity('cell')\n v = node[cell[:, 2]] - node[cell[:, 1]]\n cellType = (np.abs(v[:, 0]) > 0.0) & (np.abs(v[:, 1]) > 0.0) # TODO: 0.0-->eps\n\n\n # Step 4: move some interface nodes onto the interface\n\n edge = mesh.entity('edge')\n edge2cell = mesh.ds.edge_to_cell()\n\n isInterfaceCell = mesh.mark_interface_cell(phi)\n isInterfaceEdge = isInterfaceCell[edge2cell[:, 0:2]].sum(axis=1) == 2 \n\n isSpecial0 = cellType[edge2cell[:, 0]] & (~cellType[edge2cell[:, 1]]) & edge2cell[:, 2] == 0 & isInterfaceEdge\n \n isSpecial1 = (~cellType[edge2cell[:, 0]]) & cellType[edge2cell[:, 1]] & edge2cell[:, 2] == 0 & isInterfaceEdge\n\n isSpecial = isSpecial0 | isSpecial1\n\n isShortEdge = (edge2cell[:, 2] != 0) & (edge2cell[:, 3] == 0) & (~isSpecial) \n A = node[edge[isShortEdge, 0]]\n B = node[edge[isShortEdge, 1]]\n h = np.sqrt(np.sum((A - B)**2, axis=1))\n M = find_cut_point(interface, A, B)\n return M \n\n\ndef uniform_interface_fitted_mesh2d(interface, mesh):\n \"\"\"\n @brief 在笛卡尔网格上生成二维界面拟合网格\n\n @param mesh 结构四边形网格\n \"\"\"\n\n NN = mesh.number_of_nodes()\n NC = mesh.number_of_cells()\n\n node = mesh.entity('node')\n edge = mesh.entity('edge')\n cell = mesh.entity('cell')\n edge2cell = mesh.ds.edge_to_cell()\n\n if callable(interface):\n phi = interface(node)\n else:\n phi = interface\n sphi = msign(phi)\n\n isCutEdge = sphi[edge[:, 0]]*sphi[edge[:, 1]] < 0\n isCutCell = np.zeros(NC, dtype=np.bool_)\n isCutCell[edge2cell[isCutEdge, 0:2]] = True\n flag = (np.sum(np.abs(sphi[cell]), axis=1) < 3)\n isCutCell[flag] = True\n\n A = node[edge[isCutEdge, 0]]\n B = node[edge[isCutEdge, 1]]\n\n if callable(interface):\n cnode = find_cut_point(interface, A, B)\n else:\n phiA = np.abs(phi[edge[isCutEdge, 0]])\n phiB = np.abs(phi[edge[isCutEdge, 1]])\n l = phiA + phiB\n l0 = phiB/l\n l1 = phiA/l\n cnode = l0[:, None]*A + l1[:, None]*B\n\n node = np.r_['0', node, cnode]\n\n NCN = len(cnode) \n phi = np.append(phi, np.zeros(NCN))\n sphi = np.append(sphi, np.zeros(NCN))\n\n isSpecialCell = (np.sum(np.abs(sphi[cell]), axis=1) == 2) & (np.sum(sphi[cell], axis=1) == 0)\n\n scell = cell[isSpecialCell]\n anode = node[scell].sum(axis=1)/4\n\n if callable(interface):\n anode = interface.project(anode)\n\n NAN = len(anode)\n node = np.r_['0', node, anode]\n\n phi = np.append(phi, np.zeros(NAN))\n sphi = np.append(sphi, np.zeros(NAN))\n\n isInterfaceNode = np.zeros(NN+NCN+NAN, dtype=np.bool_)\n\n isInterfaceNode[cell[isCutCell]] = True\n isInterfaceNode[NN:] = True\n\n inode = node[isInterfaceNode]\n idxMap, = np.nonzero(isInterfaceNode)\n t = Delaunay(inode)\n tcell = t.simplices\n\n NI = len(inode) - NCN - NAN\n isUnnecessaryCell = (np.sum(tcell < NI, axis=1) == 3)\n tcell = idxMap[tcell[~isUnnecessaryCell, :]]\n\n scell = cell[~isCutCell]\n\n cell = np.r_['0', tcell, scell[:, [1, 2, 0]], scell[:, [3, 0, 2]]]\n\n return TriangleMesh(node, cell)\n\n\ndef msign(x):\n flag = np.sign(x)\n flag[np.abs(x) < 1e-8] = 0\n return flag\n\n\n\ndef datastructure(cell):\n localEdge = np.array([(1, 2), (2, 0), (0, 1)])\n totalEdge = cell[:, localEdge].reshape(-1, 2)\n _, i0, j = np.unique(np.sort(totalEdge, axis=-1),\n return_index=True,\n return_inverse=True,\n axis=0)\n NE = i0.shape[0]\n edge2cell = np.zeros((NE, 4), dtype=cell.dtype)\n\n i1 = np.zeros(NE, dtype=cell.dtype)\n i1[j] = range(3*NC)\n\n edge2cell[:, 0] = i0//3\n edge2cell[:, 1] = i1//3\n edge2cell[:, 2] = i0%3\n edge2cell[:, 3] = i1%3\n\n edge = totalEdge[i0, :]\n\n return edge, edge2cell\n \n \n\n\n","repo_name":"weihuayi/fealpy","sub_path":"fealpy/mesh/backup/InterfaceMesher.py","file_name":"InterfaceMesher.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"14875111871","text":"import asyncio\nimport io\nimport json\nfrom dataclasses import dataclass\nimport logging\nfrom typing import Any, Dict, List, Optional\n\nfrom fastapi import FastAPI, HTTPException, APIRouter, UploadFile, Form, File, Body, Depends\nfrom fastapi.responses import StreamingResponse\nfrom starlette.responses import FileResponse, PlainTextResponse, HTMLResponse\nfrom pydantic import BaseModel, Field\nfrom ..model_pool import ModelPool, MODEL_POOL\n\n\nrouter = APIRouter()\n\n\nclass ChatCompletionResponse(BaseModel):\n choices: List[dict]\n\n\nclass ChatCompletionRequest(BaseModel):\n model: str\n messages: List[dict]\n stream: bool = Field(False)\n # NOTE: The following default values will not actually take effect since we exclude unset values in conversion.\n # field which is not specified by user will have default value defined in model.generate or model.stream_generate\n top_p: float = Field(1.0)\n temperature: float = Field(1.0)\n max_tokens: int = Field(512)\n kwargs: Dict[str, Any] = Field({})\n\n @classmethod\n def __get_validators__(cls):\n yield cls._validate_from_json_string\n\n @classmethod\n def _validate_from_json_string(cls, value):\n if isinstance(value, str):\n return cls.validate(json.loads(value.encode()))\n return cls.validate(value)\n\n\n@router.post(\"/v1/mmchat/completions\")\nasync def chat_completion(\n files: List[UploadFile] = File([]),\n data: ChatCompletionRequest = Body(...),\n):\n try:\n model = await MODEL_POOL.acquire(data.model)\n except KeyError as e:\n raise HTTPException(status_code=404, detail=f\"model not found: {str(e)}\")\n\n if files is None:\n files = {}\n else:\n files = {f.filename: (await f.read(), f.content_type) for f in files}\n # logging.info(files.keys())\n\n if data.stream:\n async def stream_generate():\n async for choices in model.generate(data.dict(exclude_unset=True), files):\n resp = ChatCompletionResponse(choices=choices)\n # logging.debug(resp)\n yield json.dumps(resp.dict()) + '\\n'\n return StreamingResponse(stream_generate(), media_type='text/event-stream')\n else:\n choices = [pred async for pred in model.generate(data.dict(exclude_unset=True), files)]\n assert len(choices) == 1\n choices = choices[0]\n # logging.debug(choices)\n return ChatCompletionResponse(choices=choices).dict()\n\n\n@router.post(\"/v1/chat/completions\")\nasync def chat_completion(\n data: ChatCompletionRequest = Body(...),\n):\n try:\n model = await MODEL_POOL.acquire(data.model)\n except KeyError as e:\n raise HTTPException(status_code=404, detail=f\"model not found: {str(e)}\")\n\n if data.stream:\n async def stream_generate():\n async for choices in model.generate(data.dict(exclude_unset=True), {}):\n resp = ChatCompletionResponse(choices=choices)\n # logging.debug(resp)\n yield json.dumps(resp.dict()) + '\\n'\n return StreamingResponse(stream_generate(), media_type='text/event-stream')\n else:\n choices = [pred async for pred in model.generate(data.dict(exclude_unset=True), {})]\n assert len(choices) == 1\n choices = choices[0]\n # logging.debug(choices)\n return ChatCompletionResponse(choices=choices).dict()\n","repo_name":"XiPotatonium/chatbot-api","sub_path":"src/api/chat_completion.py","file_name":"chat_completion.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"2622334029","text":"import csv\nimport sys\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\nfrom sklearn.linear_model import Perceptron\n\nAImodel = KNeighborsClassifier(n_neighbors=1)\n# AImodel = svm.SVC()\n# AImodel = Perceptron()\n\n\nTEST_SIZE = 0.4\n\n\ndef main():\n\n # Check command-line arguments\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python shopping.py data\")\n\n # Load data from spreadsheet and split into train and test sets\n evidence, labels = load_data(sys.argv[1])\n X_train, X_test, y_train, y_test = train_test_split(\n evidence, labels, test_size=TEST_SIZE\n )\n\n # Train model and make predictions\n model = train_model(X_train, y_train)\n predictions = model.predict(X_test)\n sensitivity, specificity = evaluate(y_test, predictions)\n\n # Print results\n print(f\"Correct: {(y_test == predictions).sum()}\")\n print(f\"Incorrect: {(y_test != predictions).sum()}\")\n print(f\"True Positive Rate: {100 * sensitivity:.2f}%\")\n print(f\"True Negative Rate: {100 * specificity:.2f}%\")\n\n\ndef load_data(filename):\n \"\"\"\n Load shopping data from a CSV file `filename` and convert into a list of\n evidence lists and a list of labels. Return a tuple (evidence, labels).\n\n evidence should be a list of lists, where each list contains the\n following values, in order:\n - Administrative, an integer\n - Administrative_Duration, a floating point number\n - Informational, an integer\n - Informational_Duration, a floating point number\n - ProductRelated, an integer\n - ProductRelated_Duration, a floating point number\n - BounceRates, a floating point number\n - ExitRates, a floating point number\n - PageValues, a floating point number\n - SpecialDay, a floating point number\n - Month, an index from 0 (January) to 11 (December)\n - OperatingSystems, an integer\n - Browser, an integer\n - Region, an integer\n - TrafficType, an integer\n - VisitorType, an integer 0 (not returning) or 1 (returning)\n - Weekend, an integer 0 (if false) or 1 (if true)\n\n labels should be the corresponding list of labels, where each label\n is 1 if Revenue is true, and 0 otherwise.\n \"\"\"\n with open(filename) as f:\n reader = csv.reader(f)\n next(reader)\n\n evidence = []\n labels = []\n for row in reader:\n evidenceRow = []\n evidenceRow = row[:17]\n for i in range(len(evidenceRow)):\n if i == 0 or i == 2 or i == 4 or i == 11 or i == 12 or i == 13 or i == 14:\n evidenceRow[i] = int(evidenceRow[i])\n elif i == 10:\n if evidenceRow[i] == 'Jan':\n evidenceRow[i] = 0\n elif evidenceRow[i] == 'Feb':\n evidenceRow[i] = 1\n elif evidenceRow[i] == 'Mar':\n evidenceRow[i] = 2\n elif evidenceRow[i] == 'Apr':\n evidenceRow[i] = 3\n elif evidenceRow[i] == 'May':\n evidenceRow[i] = 4\n elif evidenceRow[i] == 'June':\n evidenceRow[i] = 5\n elif evidenceRow[i] == 'Jul':\n evidenceRow[i] = 6\n elif evidenceRow[i] == 'Aug':\n evidenceRow[i] = 7\n elif evidenceRow[i] == 'Sep':\n evidenceRow[i] = 8\n elif evidenceRow[i] == 'Oct':\n evidenceRow[i] = 9\n elif evidenceRow[i] == 'Nov':\n evidenceRow[i] = 10\n elif evidenceRow[i] == 'Dec':\n evidenceRow[i] = 11\n\n elif i == 15:\n if evidenceRow[i] == 'Returning_Visitor':\n evidenceRow[i] = 1\n else:\n evidenceRow[i] = 0\n\n elif i == 16:\n if evidenceRow[i] == 'TRUE':\n evidenceRow[i] = 1\n else:\n evidenceRow[i] = 0\n else:\n evidenceRow[i] = float(evidenceRow[i])\n\n\n evidence.append(evidenceRow)\n\n if row[-1] == 'TRUE':\n labels.append(1)\n else:\n labels.append(0)\n return (evidence, labels)\n\ndef train_model(evidence, labels):\n \"\"\"\n Given a list of evidence lists and a list of labels, return a\n fitted k-nearest neighbor model (k=1) trained on the data.\n \"\"\"\n # print(evidence[0:10])\n # print(labels[0:10])\n return AImodel.fit(evidence, labels)\n\n\ndef evaluate(labels, predictions):\n \"\"\"\n Given a list of actual labels and a list of predicted labels,\n return a tuple (sensitivity, specificity).\n\n Assume each label is either a 1 (positive) or 0 (negative).\n\n `sensitivity` should be a floating-point value from 0 to 1\n representing the \"true positive rate\": the proportion of\n actual positive labels that were accurately identified.\n\n `specificity` should be a floating-point value from 0 to 1\n representing the \"true negative rate\": the proportion of\n actual negative labels that were accurately identified.\n \"\"\"\n # print(labels[0:10])\n # print(predictions[0:10])\n\n countPositive = 0\n countSensitivity = 0\n countNegative = 0\n countSpecificity = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n countPositive += 1\n if predictions[i] == 1:\n countSensitivity += 1\n else:\n countNegative += 1\n if predictions[i] == 0:\n countSpecificity += 1\n Sensitivity = countSensitivity/countPositive\n Specificity = countSpecificity/countNegative\n\n return (Sensitivity, Specificity)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TomasMos/shopping","sub_path":"shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11213814737","text":"import random\r\nMAX_HEALTH = 100\r\nMIN_POWER = 10\r\nMAX_POWER = 20\r\nplayer_health = MAX_HEALTH\r\nplayer_name = input(\"Enter your name: \")\r\ncomputer_health = MAX_HEALTH\r\ncomputer_name = \"Computer\"\r\nwhile True:\r\n print(f\"{player_name}'s turn\")\r\n print(f\"{player_name}'s health: {player_health}\")\r\n print(f\"{computer_name}'s health: {computer_health}\")\r\n player_input = input(f\"{player_name}, do you want to attack or defend? \")\r\n\r\n if player_input.lower() == \"attack\":\r\n player_power = random.randint(MIN_POWER, MAX_POWER)\r\n computer_health -= player_power\r\n print(f\"{player_name} attacks for {player_power} damage!\")\r\n elif player_input.lower() == \"defend\":\r\n print(f\"{player_name} defends.\")\r\n else:\r\n print(\"Invalid input. Try again.\")\r\n continue\r\n if computer_health <= 0:\r\n print(f\"{player_name} wins!\")\r\n break\r\n print(f\"{computer_name}'s turn\")\r\n print(f\"{player_name}'s health: {player_health}\")\r\n print(f\"{computer_name}'s health: {computer_health}\")\r\n computer_input = random.choice([\"attack\", \"defend\"])\r\n if computer_input == \"attack\":\r\n computer_power = random.randint(MIN_POWER, MAX_POWER)\r\n player_health -= computer_power\r\n print(f\"{computer_name} attacks for {computer_power} damage!\")\r\n else:\r\n print(f\"{computer_name} defends.\")\r\n if player_health <= 0:\r\n print(f\"{computer_name} wins!\")\r\n break\r\n","repo_name":"Abdiyo178/Text-Boxing-Game","sub_path":"TheBoxingGame.py","file_name":"TheBoxingGame.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6745713981","text":"import argparse\nimport utils.paths\nimport thesis.dataset\nimport torch.utils.data\nimport matplotlib.pyplot as plt\nimport thesis_inpainting.runner\nimport models.vgg_16\nimport models.thesis_alignment\nimport models.thesis_inpainting\nimport os.path\n\nparser = argparse.ArgumentParser(description='Cleans invalid images')\nparser.add_argument('--data-path', required=True, help='Path where the images are stored')\nparser.add_argument('--experiments-path', required=True, help='Path where the experiments are stored')\nparser.add_argument('--results-path', required=True, help='Path where the results should be stored')\nparser.add_argument('--device', default='cpu', help='Device to use')\nargs = parser.parse_args()\n\n# Prepare the dataset\ndata_meta = utils.paths.DatasetPaths.get_items('got-10k', args.data_path, 'validation')\ndataset = thesis.dataset.MaskedSequenceDataset(\n gts_dataset=thesis.dataset.ContentProvider(args.data_path, data_meta, None), masks_dataset=None,\n gts_simulator=None, masks_simulator=None,\n image_size=(240, 480), frames_n=-1, frames_spacing=1, frames_randomize=False,\n dilatation_filter_size=(3, 3), dilatation_iterations=0,\n force_resize=True, keep_ratio=False\n)\n\n# Load the models\nmodel_vgg = models.vgg_16.get_pretrained_model(args.device)\nmodel_alignment = models.thesis_alignment.ThesisAlignmentModel(model_vgg).to(args.device)\nmodel = models.thesis_inpainting.ThesisInpaintingVisible().to(args.device)\n\n# Load aligner checkpoint\nexperiment_path = os.path.join(args.experiments_path, 'align_double')\ncheckpoint_path = os.path.join(experiment_path, 'checkpoints', '{}.checkpoint.pkl'.format(64))\nwith open(checkpoint_path, 'rb') as checkpoint_file:\n model_alignment.load_state_dict(torch.load(checkpoint_file, map_location=args.device)['model'])\n\n# Load inpainting checkpoint\nexperiment_path = os.path.join(args.experiments_path, 'inpaint_double')\ncheckpoint_path = os.path.join(experiment_path, 'checkpoints', '{}.checkpoint.pkl'.format(136))\nwith open(checkpoint_path, 'rb') as checkpoint_file:\n model.load_state_dict(torch.load(checkpoint_file, map_location=args.device)['model'])\n\n# Iterate over the data\nfor it_data in dataset:\n (x, m), y, info = it_data\n x, m, y = x.to(args.device), m.to(args.device), y.to(args.device)\n y_inpainted = thesis_inpainting.runner.ThesisInpaintingRunner.inpainting_algorithm_ff(\n x, m, y, model_alignment, model\n )\n frames_to_video = utils.FramesToVideo(0, 10, None)\n frames_to_video.add_sequence(y_inpainted.cpu().numpy().transpose(1, 2, 3, 0) * 255)\n frames_to_video.save(args.results_path, info[0])\n print('Video saved')","repo_name":"AlomdaElmasry/master_thesis","sub_path":"scripts/inpaint.py","file_name":"inpaint.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28459711243","text":"from requests_html import HTMLSession\nfrom fake_useragent import UserAgent\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\nclass Twitter:\n base_url = 'https://twitter.com'\n api_base_url = 'https://api.twitter.com'\n mobile_base_url = 'https://mobile.twitter.com'\n user_agent = UserAgent()\n session = HTMLSession()\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Referer': base_url,\n 'User-Agent': user_agent.opera,\n 'X-Twitter-Active-User': 'yes',\n }\n search_params = {\n 'vertical': 'default',\n 'src': 'unkn',\n 'include_available_features': 1,\n 'include_entities': 1,\n 'max_position': None,\n 'reset_error_state': 'false',\n 'lang': 'en',\n 'f': 'tweets',\n }\n\n @classmethod\n def get_page(cls, url, params=None):\n response = cls.session.get(url=url, params=params, headers=cls.headers, verify=False)\n return response\n\n @classmethod\n def get_json_page(cls, url, params=None):\n headers = cls.headers\n headers['X-Requested-With'] = 'XMLHttpRequest'\n response = cls.session.get(url=url, params=params, headers=headers, verify=False)\n return response.json()","repo_name":"threatlead/tweetparse","sub_path":"tweetparse/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40276179310","text":"from pyini_parser.configure.parser import ConfigParser\n\nconfig = ConfigParser()\n\nconfig[\"deployment\"] = {\n \"domain_name\": \"www.example.com\",\n \"secret_key\": \"!@#$#$#@!!\",\n}\n\nconfig[\"database\"] = {\n \"host\": \"localhost\",\n \"port\": \"3306\",\n \"user\": \"root\",\n}\nconfig[\"email\"] = {\n \"host\": \"smtp.gmail.com\",\n \"port\": \"587\",\n}\nconfig[\"devolvement\"] = {\n \"api_key\": \"!@#$%^&*()_+\",\n}\nstring_content = \"\"\"\n [deployment]\n domain_name=www.example.com\n secret_key=!@#$#$#@!!\n [devolvement]\n api_key=!@#$%^&*()_+\n\"\"\"\n\nwith open(\"example.ini\", \"w\") as f:\n config.write(f) # Check example.ini file contents\n print(config.read(f)) # Read example.ini file contents | you must have file example.ini in the same directory\n # config.sections() # Return a list of sections\n # config.get(\"deployment\", \"domain_name\") # Get the value of a key in a section\n # config.append('devolvement', {'password':'#%$%80@#$36415'}) # Append a new key/value pair to section 'newsection'\n # config.read_from_string(string_content)\n\n'''\n# Uncomment the following lines to test the read method if you have a file\nwith open(\"example.ini\", \"r\") as f:\n config.read_from_string(string_content) # you can use this method to check if everything working well\n config.get(\"section\", \"key\") # Get the value of a key in a section\n'''","repo_name":"Mahmoud-Emad/Python-ini-configuration-parser","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4236664737","text":"import ffcv.fields.decoders as decoders\nfrom ffcv.transforms import RandomHorizontalFlip, Cutout, RandomTranslate\nimport torchvision\nfrom PIL import Image\nimport numpy as np\n\n# These take in the desired image size, and the beton image size\nIMAGE_DECODERS = {\n 'simple': lambda imgsz: decoders.SimpleRGBImageDecoder(),\n 'resized_crop': lambda imgsz: decoders.ResizedCropRGBImageDecoder((imgsz, imgsz)),\n 'random_resized_crop': lambda imgsz: decoders.RandomResizedCropRGBImageDecoder((imgsz, imgsz)),\n 'center_crop_256': lambda imgsz: decoders.CenterCropRGBImageDecoder((imgsz, imgsz), 224/256),\n 'center_crop_75': lambda imgsz: decoders.CenterCropRGBImageDecoder((imgsz, imgsz), 64/75),\n 'center_crop_full': lambda imgsz: decoders.CenterCropRGBImageDecoder((imgsz, imgsz), 1),\n}\n\ndef cifar_train_aug(hparams):\n mean = hparams['mean']\n return [\n RandomHorizontalFlip(),\n RandomTranslate(padding=2, fill=tuple(map(int, mean))),\n Cutout(4, tuple(map(int, mean))),\n ]\n\ndef imagenet_train_aug(hparams):\n return [RandomHorizontalFlip()]\n\n\nclass PyCutOut:\n def __init__(self, crop_size, fill):\n self.crop_size = crop_size\n self.fill = fill\n \n def __call__(self, sample):\n sample = np.array(sample)\n crop_size = self.crop_size\n H, W = sample.shape[0], sample.shape[1]\n coord = (\n np.random.randint(H - crop_size + 1),\n np.random.randint(W - crop_size + 1),\n )\n sample[coord[0]:coord[0]+crop_size, coord[1]:coord[1]+crop_size] = self.fill\n return Image.fromarray(sample.astype(np.uint8))\n \nclass PyTranslate:\n def __init__(self, padding):\n self.padding = padding\n \n def __call__(self, sample):\n sample = np.array(sample)\n pad = self.padding\n h, w, c = sample.shape\n dst = np.zeros((h+2*pad, w+2*pad, c)).astype(np.uint8)\n dst[pad:pad+h, pad:pad+w] = sample\n y_coord = np.random.randint(low=0, high=2*pad+1)\n x_coord = np.random.randint(low=0, high=2*pad+1)\n return dst[y_coord:y_coord+h, x_coord:x_coord+w]\n\nIMAGE_AUGS = {\n 'cifar_train_aug': cifar_train_aug,\n 'imagenet_train_aug': imagenet_train_aug, \n}","repo_name":"MadryLab/failure-directions","sub_path":"failure_directions/src/decoders_and_transforms.py","file_name":"decoders_and_transforms.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"10080525649","text":"import argparse\nimport functools\nimport logging\nimport os\n\nimport torch\n\nfrom direct.common.subsample import build_masking_function\nfrom direct.inference import build_inference_transforms, setup_inference_save_to_h5\nfrom direct.launch import launch\nfrom direct.utils import set_all_seeds\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_transforms(env):\n dataset_cfg = env.cfg.inference.dataset\n mask_func = build_masking_function(**dataset_cfg.transforms.masking)\n transforms = build_inference_transforms(env, mask_func, dataset_cfg)\n return dataset_cfg, transforms\n\n\nsetup_inference_save_to_h5 = functools.partial(\n setup_inference_save_to_h5,\n functools.partial(_get_transforms),\n)\n\n\ndef predict_from_argparse(args: argparse.Namespace):\n # This sets MKL threads to 1.\n # DataLoader can otherwise bring a lot of difficulties when computing CPU FFTs in the transforms.\n torch.set_num_threads(1)\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n set_all_seeds(args.seed)\n experiment_directory = (\n args.experiment_directory if args.experiment_directory is not None else args.output_directory\n )\n\n launch(\n setup_inference_save_to_h5,\n args.num_machines,\n args.num_gpus,\n args.machine_rank,\n args.dist_url,\n args.name,\n args.data_root,\n experiment_directory,\n args.output_directory,\n args.filenames_filter,\n args.checkpoint,\n args.device,\n args.num_workers,\n args.machine_rank,\n args.cfg_file,\n None,\n args.mixed_precision,\n args.debug,\n False,\n )\n","repo_name":"NKI-AI/direct","sub_path":"direct/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"37"} +{"seq_id":"6374052661","text":"# Run redmonster on a cluster using multiprocessesing\n#\n# Tim Hutchinson, University of Utah, August 2014\n# t.hutchinson@utah.edu\n\nfrom time import gmtime, strftime\n\nimport numpy as n\nimport matplotlib.pyplot as p\np.interactive(True)\nimport multiprocessing as mp\nfrom astropy.io import fits\n\nfrom redmonster.sandbox import yanny as y\nfrom redmonster.datamgr import spec, io\nfrom redmonster.physics import zfinder, zfitter, zpicker\n\n\ndef parallel_rm(xxx_todo_changeme ):\n (plate,mjd,fiberid) = xxx_todo_changeme\n specs = spec.Spec(plate=plate, mjd=mjd, fiberid=fiberid)\n zssp = zfinder.ZFinder(fname='ndArch-ssp_em_galaxy-v000.fits', npoly=4, zmin=-0.01, zmax=1.2)\n zssp.zchi2(specs.flux, specs.loglambda, specs.ivar)\n # Write chi2 file with zbase\n prihdu = fits.PrimaryHDU(zssp.zchi2arr)\n col1 = fits.Column(name='ZBASE', format='E', array=zssp.zbase)\n cols = fits.ColDefs([col1])\n tbhdu = fits.BinTableHDU.from_columns(cols)\n thdulist = fits.HDUList([prihdu,tbhdu])\n thdulist.writeto('chi2arr-%s-%s.fits' % (plate, zssp.type), overwrite=True)\n # ----\n #zstar = zfinder.ZFinder(fname='ndArch-spEigenStar-55734.fits', npoly=4, zmin=-.005, zmax=.005)\n #zstar.zchi2(specs.flux, specs.loglambda, specs.ivar)\n #zfit_ssp = zfitter.ZFitter(zssp.zchi2arr, zssp.zbase)\n #zfit_ssp.z_refine(threshold=this_thresh)\n #zfit_star = zfitter.ZFitter(zstar.zchi2arr, zstar.zbase)\n #zfit_star.z_refine(threshold=this_thresh)\n #zpick = zpicker.ZPicker(specs, zssp, zfit_ssp, zstar, zfit_star)\n #ssp_flags = n.zeros(len(fiberid))\n #star_flags = n.zeros(len(fiberid))\n #for ifiber in xrange(len(fiberid)):\n # ssp_flags[ifiber] = (int(specs.zwarning[ifiber]) | int(zssp.zwarning[ifiber])) | int(zfit_ssp.zwarning[ifiber])\n # star_flags[ifiber] = (int(specs.zwarning[ifiber]) | int(zstar.zwarning[ifiber])) | int(zfit_star.zwarning[ifiber])\n ## Write flags file\n #prihdu = fits.PrimaryHDU(ssp_flags)\n #thdulist = fits.HDUList([prihdu])\n #thdulist.writeto('flags-%s-%s.fits' % (plate, zssp.type), overwrite=True)\n #output = io.WriteRedmonster(zpick, dest='~/scratch', overwrite=True)\n\n# Read yanny file\nx = y.yanny(filename='spInspect_alltest_bolton.par.txt', np=True)\n\n# Get fibers, zpipe, zperson for each plate\nargs = n.where(x['BOSSOBJECT']['plate'] == 3686)[0]\nfibers3686 = []\nzpipe3686 = []\nzperson3686 = []\nfor i in args:\n fibers3686.append( x['BOSSOBJECT'][i][2])\n zpipe3686.append( x['BOSSOBJECT'][i][5])\n zperson3686.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3687)[0]\nfibers3687 = []\nzpipe3687 = []\nzperson3687 = []\nfor i in args:\n fibers3687.append( x['BOSSOBJECT'][i][2])\n zpipe3687.append( x['BOSSOBJECT'][i][5])\n zperson3687.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3804)[0]\nfibers3804 = []\nzpipe3804 = []\nzperson3804 = []\nfor i in args:\n fibers3804.append( x['BOSSOBJECT'][i][2])\n zpipe3804.append( x['BOSSOBJECT'][i][5])\n zperson3804.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3805)[0]\nfibers3805 = []\nzpipe3805 = []\nzperson3805 = []\nfor i in args:\n fibers3805.append( x['BOSSOBJECT'][i][2])\n zpipe3805.append( x['BOSSOBJECT'][i][5])\n zperson3805.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3853)[0]\nfibers3853 = []\nzpipe3853 = []\nzperson3853 = []\nfor i in args:\n fibers3853.append( x['BOSSOBJECT'][i][2])\n zpipe3853.append( x['BOSSOBJECT'][i][5])\n zperson3853.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3855)[0]\nfibers3855 = []\nzpipe3855 = []\nzperson3855 = []\nfor i in args:\n fibers3855.append( x['BOSSOBJECT'][i][2])\n zpipe3855.append( x['BOSSOBJECT'][i][5])\n zperson3855.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3856)[0]\nfibers3856 = []\nzpipe3856 = []\nzperson3856 = []\nfor i in args:\n fibers3856.append( x['BOSSOBJECT'][i][2])\n zpipe3856.append( x['BOSSOBJECT'][i][5])\n zperson3856.append( x['BOSSOBJECT'][i][6])\n\nargs = n.where(x['BOSSOBJECT']['plate'] == 3860)[0]\nfibers3860 = []\nzpipe3860 = []\nzperson3860 = []\nfor i in args:\n fibers3860.append( x['BOSSOBJECT'][i][2])\n zpipe3860.append( x['BOSSOBJECT'][i][5])\n zperson3860.append( x['BOSSOBJECT'][i][6])\n\n\n\n\nargs1= [(3686, 55268, fibers3686),(3687, 55269, fibers3687),\n (3804, 55267, fibers3804),(3805, 55269, fibers3805),\n (3853, 55268, fibers3853),(3855, 55268, fibers3855),\n (3856, 55269, fibers3856),(3860, 55269, fibers3860)]\n\n\n\nnum_proc = 8\npool = mp.Pool(num_proc)\nresult = pool.map(parallel_rm,args1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"timahutchinson/redmonster","sub_path":"python/redmonster/sandbox/par_rm.py","file_name":"par_rm.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"74220038508","text":"import os\nimport sys\nimport warnings\n\nfrom setuptools import find_packages, setup\n\nNAME = \"CNNectome\"\nDESCRIPTION = \"A collection of scripts for building, training and validating Convolutional Neural Networks (CNNs) for Connectomics\"\nURL = \"https://github.com/saalfeldlab/CNNectome\"\nEMAIL = \"heinrichl@janelia.hhmi.org\"\nAUTHOR = \"Larissa Heinrich\"\nREQUIRES_PYTHON = \">=3.6\"\n\nREQUIRED = [\n # \"tensorflow_gpu<1.15\",\n \"absl-py>=0.9\",\n \"appdirs\",\n \"dnspython\",\n \"numpy\",\n \"scipy<1.6\",\n \"cython\",\n \"h5py\",\n \"zarr>=2.4.0\",\n \"joblib\",\n \"lazy-property\",\n \"scikit-image\",\n \"matplotlib\",\n \"memory_profiler\",\n \"more-itertools\",\n \"pymongo\",\n \"scikit-learn\",\n \"SimpleITK\",\n \"tabulate\",\n \"corditea @ git+https://github.com/saalfeldlab/corditea\",\n \"cremi @ git+https://github.com/cremi/cremi_python@python3\",\n \"gunpowder @ git+https://github.com/neptunes5thmoon/gunpowder@dist_transform_py3\",\n \"fuse @ git+https://github.com/neptunes5thmoon/fuse@my_pipinstallable_version\",\n \"neptunes5thmoon-simpleference @ git+https://github.com/neptunes5thmoon/simpleference@master\",\n]\n\nEXTRAS = {\n \"synapse_postprocessing\": [\"luigi\"],\n \"malis_loss\": [\"malis @ git+https://github.com/neptunes5thmoon/malis@fix_setup\"],\n \"napari\": [\"napari\"],\n \"dev\": [\"pytest\", \"jupyter\", \"black\"],\n \"tf\": \"tensorflow_gpu<1.15\",\n}\n\nDEPENDENCY_LINKS = [\n \"git+https://github.com/saalfeldlab/corditea@main#egg=corditea\",\n \"git+https://github.com/cremi/cremi_python.git@python3#egg=cremi\",\n \"git+https://github.com/neptunes5thmoon/gunpowder.git@dist_transform_py3#egg=gunpowder\",\n \"git+https://github.com/neptunes5thmoon/fuse.git@my_pipinstallable_version#egg=fuse\",\n \"git+https://github.com/neptunes5thmoon/malis.git@fix_setup#egg=malis\",\n \"git+https://github.com/neptunes5thmoon/simpleference.git@master#egg=simpleference[zarr]\"\n \"git+https://github.com/neptunes5thmoon/simpleference.git@master#egg=neptunes5thmoon-simpleference\",\n]\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, \"README.md\"), \"r\") as f:\n LONG_DESCRIPTION = \"\\n\" + f.read()\nwith open(os.path.join(here, \"CNNectome\", \"VERSION\"), \"r\") as version_file:\n VERSION = version_file.read().strip()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(),\n entry_points={\n \"console_scripts\": [\n \"add_missing_n5_attributes = CNNectome.utils.add_missing_n5_attributes:main\",\n \"auto_evaluation = CNNectome.validation.organelles.auto_evaluation:main\",\n \"init_CNNectome_config = CNNectome.utils.config_loader:get_config\",\n \"check_inference_complete = CNNectome.inference.check_inference_complete:main\",\n \"unet_inference = CNNectome.inference.unet_inference:main\",\n ],\n },\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n dependency_links=DEPENDENCY_LINKS,\n package_data={\"CNNectome\": [\"etc/config_local.ini\", \"VERSION\"]},\n include_package_data=True,\n license=\"BSD-2-Clause\",\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n","repo_name":"saalfeldlab/CNNectome","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"22034404143","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------\ndef JulianDay(GregDay, **kwargs):\n\t'''\n\tFunction to convert the Gregorian date to Julian Day Number(JDN)\n\t\n\tReference: https://en.wikipedia.org/wiki/Julian_day\n\n\tParameters\n\t----------\n\tGregDay\t: string\n\t\t\t format - yyyymmdd\n\t\t\t \n\tOptional Parameters\n\t----------\n\touttype : string, specify the output type, int, string, or nasa format\n\ttype\t: string\n\t\t\t global - output is global Julian Day Number since November 23, −4713\n\t\t\t local - output is localized to that year\n\tReturn\n\t---------- \n\tjdn\t\t: int or str, defalt type int\n\t\t\t Julian Day Number\n\t'''\n\timport numpy as np\n\touttype = kwargs.get('outtype', 'int')\n\ttype = kwargs.get('type', 'global')\n\n\n\tyear = np.int(GregDay[0:4])\n\tmonth = np.int(GregDay[4:6])\n\tday = np.int(GregDay[6:8])\n\tJulian_a = (14-month)//12\n\tJulian_y = year + 4800 - Julian_a\n\tJulian_m = month + 12 * Julian_a - 3\n\n\n\tjdn = day + (153*Julian_m+2)//5 + 365*Julian_y + Julian_y//4 - Julian_y//100 + Julian_y//400 -32045\n\n\n\tif type == 'local':\n\t\tjdn_base = JulianDay( str(year) + '0101', type = 'global' )\n\t\tjdn = jdn - jdn_base + 1\n\n\tif outtype == 'str':\n\t\tjdn = np.str(jdn)\n\n\tif outtype == 'nasa':\n\t\tjdn_base = JulianDay( str(year) + '0101', type = 'global' )\n\t\tjdn = jdn - jdn_base + 1\n\t\tjdn = str(jdn)\n\t\twhile len(jdn) < 3:\n\t\t\tjdn = '0' + jdn\n\t\tjdn = 'A' + GregDay[0:4] + jdn\n\t\n\treturn jdn\n\n#-----------------------------------------------------------------------\ndef GregorianDay(jnd, outputformat = 'yyyy-mm-dd', **kwargs):\n '''\n Function to convert the Julian Day Number(JDN)the Gregorian date\n http://aa.usno.navy.mil/faq/docs/JD_Formula.php\n\n\tParameters\n\t----------\n\tjdn\t\t\t\t: julian day of year\n\toutputformat\t: number of day of the year\n\n\tReturn\n\t---------- \n\tGregDay\t\t\t: Gregorian date\n\t\n '''\n \n noYear = kwargs.get('noYear', False)\n l = jnd + 68569\n n = 4*l//146097\n\t\n l= l-(146097*n+3)//4\n year= 4000*(l+1)//1461001\n l= l-1461*year//4+31\n month= 80*l//2447\n day = l-2447*month//80\n l= month//11\n month= month + 2 - 12*l\n year= 100*(n-49) + year + l\n\n if day < 10:\n day = '0' + str(day)\n if month <10:\n month = '0' + str(month)\n GregDay = outputformat\n GregDay = GregDay.replace('dd',str(day))\n GregDay = GregDay.replace('mm',str(month))\n if noYear:\n \tGregDay = GregDay.replace('yyyy-','')\n else:\n \tGregDay = GregDay.replace('yyyy',str(year))\n\n return GregDay\n\n#-----------------------------------------------------------------------\ndef get_displatDate(date, format = 'mm-dd-yyyy', **kwargs):\n\n\tyear = date[1:5]\n\tyearlyJDN = date[5:]\t\n\tjdbBase = year + '0101'\n\tglobalJDN = JulianDay(jdbBase,outtype = 'int') + int(yearlyJDN) - 1\n\tdisplatDate = GregorianDay(globalJDN, outputformat = format, **kwargs)\n\t\n\treturn displatDate\n\t\n#-----------------------------------------------------------------------\ndef get_date_series(gregDayBeg, gregDayEnd, **kwargs):\n\n\timport numpy as np\n\touttype = kwargs.get('outtype', 'jdn')\n\toutputformat = kwargs.get('outputformat', 'yyyy-mm-dd')\n\t\n\tjdnBeg = JulianDay(gregDayBeg)\n\tjdnEnd = JulianDay(gregDayEnd) + 1\n\tjdns = np.arange(jdnBeg, jdnEnd)\n\t\n\tdateSeries = []\n\tif outtype == 'jdn':\n\t\tdateSeries = list( jdns )\n\t\t\n\tif outtype == 'greg':\n\t\tfor jdn in jdns:\n\t\t\tGregDay = GregorianDay(jdn, outputformat = outputformat)\n\t\t\tdateSeries.append(GregDay)\n\t\t\t\n\tif outtype == 'nasa':\n\t\tfor jdn in jdns:\n\t\t\tGregDay = GregorianDay(jdn, outputformat = 'yyyymmdd')\n\t\t\tdateSeries.append(JulianDay(GregDay, outtype = 'nasa'))\t\t\n\t\n\treturn dateSeries\n#-----------------------------------------------------------------------\ndef get_month(dateBeg, dateEnd):\n\n\timport numpy as np\n\tmonthDic = { 1 : 'JAN', 2 : 'FEB', 3 : 'MAR',\n\t 4 : 'APR', 5 : 'MAY', 6 : 'JUN',\n\t 7 : 'JUL', 8 : 'AUG', 9 : 'SEP',\n\t 10 : 'OCT', 11 : 'NOV', 12 : 'DEC'}\n\t \t\n\tmonthBeg = dateBeg[4:6]\n\tmonthEnd = dateEnd[4:6]\n\t\n\tmonthBegInt = np.int(np.float(monthBeg))\n\tmonthEndInt = np.int(np.float(monthEnd))\n\t\n\tmonthStr = []\n\t\n\tfor id in range(monthBegInt, monthEndInt + 1):\n\t\tmonthStr.append(monthDic[id])\n\t\n\treturn monthStr\n\n#-----------------------------------------------------------------------\ndef cal_grid(tile, numCeil, proj):\n\t'''\n\tFunction to calculate the geographical coordinates for a given projection\n\tmethod\n\t\n\tParameters\n ----------\n\t\ttile - str format, example: 'h07v05'\n\t\tnumCeil - number of the ceils in one tile\n\t\tproj : projection method, Sinusoidal and PlateCarree available..\n\tReturn\n ---------- \n\t\tgeographical coordinates of the tile\n\t'''\n\tif proj == 'Sinusoidal':\n\t\tlatitude, longitude = cal_sinu_grid(tile, numCeil)\n\tif proj == 'PlateCarree':\n\t\tlatitude, longitude = cal_PlateCarree_grid(tile, numCeil)\n\t\t\n\treturn latitude, longitude\n\t\n#-----------------------------------------------------------------------\ndef get_cord(cord, numCeil, proj):\n\t'''\n\tFunction to get the geographical coordinates of the given region for \n\ta given projection method\n\n\tParameters\n ----------\n \tcord : list or tuple\n\t\t\t geographical coordinates \n\t\t\t (Top Latitude, Bottom Latitude, Left Longitude, Right Longitude)\n\t\t\n\t\tnumCeil : number of pixel in one tile, default value 1200\n\t\tproj : projection method, Sinusoidal and PlateCarree available..\n\t\t\n\tReturn\n ----------\n\t\tmeshLat : array\n\t\t\t\t longitude \n\t\tmeshLon : array\n\t\t\t\t longitude \n\t\thidMin\t: int\n \t\t vetical index of the point \n\t\thidMax\t: int\n \t\t \t vetical index of the point \n\t\tvidMin : int\n \t\t \t vetical index of the point \n\t\tvidMax : int\n \t\t \t vetical index of the point\n\t'''\n\tif proj == 'Sinusoidal':\n\t\tmeshLat, meshLon, hidMin, hidMax, vidMin, vidMax = \\\n\t\tget_cord_Sinusoidal(cord, numCeil, debug = 0)\n\tif proj == 'PlateCarree':\n\t\tmeshLat, meshLon, hidMin, hidMax, vidMin, vidMax = \\\n\t\tget_cord_PlateCarree(cord, numCeil, debug = 0)\n\t\t\n\treturn meshLat, meshLon, hidMin, hidMax, vidMin, vidMax\n\t\n#-----------------------------------------------------------------------\ndef get_point_tile(cord, pos):\n\t'''\n\tFunction of calculating the tile of a specific point\n\t\n\tParameters\n ----------\n \tcord : list or tuple\n\t\t\t geographical coordinates (latitude, longituede)\n\t\t\n\t\tpos : position of the point\n\t\t\n\tReturn\n ----------\n \thid : int\n \t\t horizental index of the point\n \tvid : int\n \t\t vetical index of the point\n \ttile: str\n \t\t tile name\n\t'''\n\tif proj == 'Sinusoidal':\n\t\tlatitude, longitude = get_point_tile_Sinusoidal(cord, pos)\n\tif proj == 'PlateCarree':\n\t\tlatitude, longitude = get_point_tile_PlateCarree(cord, pos)\n\t\t\n\treturn latitude, longitude\n\n\n#-----------------------------------------------------------------------\n# here after, function related to each projection method\n'''\nFunction of Sinusoidal projection...\n'''\ndef cal_sinu_grid(tile, numCeil):\n\t'''\n\t\n\tFunction to calculate the geographical coordinates of the sinusoidal grid\n\t\n\tParameters\n ----------\n\t\ttile - str format, example: 'h07v05'\n\t\tnumCeil - number of the ceils in one tile\n\t\n\tReturn\n ---------- \n\t\tgeographical coordinates of the tile\n\t\n\tReference: 1. https://code.env.duke.edu/projects/mget/wiki/SinusoidalMODIS\n\t\t\t 2. https://onlinelibrary.wiley.com/doi/pdf/10.1111/0033-0124.00327\n\t'''\n\timport numpy as np\n\n\thalfCeilLen = 926.62543305/2.0\n\thalfHoriLenght = 20015109.354\n\thalfVertLenght = 10007554.677\t\n\tnumHoriTail = 37\n\tnumVertTail = 19\n\t\n\txx = np.linspace(-halfHoriLenght, halfHoriLenght, numHoriTail)\n\tyy = np.linspace(halfVertLenght, -halfVertLenght, numVertTail) \n\t\t\n\tvid = np.int(np.float(tile[4:6]))\n\thid = np.int(np.float(tile[1:3]))\n\tprint(' - Calulating geographical coordinates of ', tile)\n\tprint(' Vertical Tile:', vid, 'Horizontal Tile:', hid)\n\n\tx = np.linspace(xx[hid] + halfCeilLen, xx[hid+1] - halfCeilLen, numCeil)\n\ty = np.linspace(yy[vid] - halfCeilLen, yy[vid+1] + halfCeilLen, numCeil)\n\t\n\txv, yv = np.meshgrid(x, y)\n\t\n\tlatitude, longitude = sinu_to_geog((xv, yv))\n\t\n\treturn latitude, longitude\n\n#-----------------------------------------------------------------------\ndef sinu_to_geog(cord):\n\t'''\n\tFunction of converting the sinusoidal projection to geographical projection\n\t\n\tParameters\n ----------\n\t\tsinusoidal point - list or tuple liked, (x, y)\n\t\t\n\tReturn\n ----------\n\t\tgeographical coordinates - latitude, longituede\n\n\t'''\n\timport numpy as np\n\tx = cord[0]\n\ty = cord[1]\n\tpi = 180.0 / np.pi\n\tR = 6371007.181000\n\t\n\tphi = y/R\n\tlamda = x / np.cos(phi) / R\n\t\n\tlatitude = phi * pi\n\tlongituede = lamda * pi\n\n\treturn latitude, longituede\n\n#-----------------------------------------------------------------------\ndef geog_to_sinu(cord):\n\t'''\n\tFunction of converting the geographical projection to sinusoidal projection\n\t\n\tParameters\n ----------\n\t\tgeographical coordinates - list or tuple liked, (latitude, longituede)\n\t\t\n\tReturn\n ----------\n\t\tsinusoidal point - (x, y)\n\t\n\t\n\t'''\n\t\n\timport numpy as np\n\n\tlat = cord[0]\n\tlon = cord[1]\n\t\n\tpi = 180.0 / np.pi\n\tR = 6371007.181000\n\t\n\tphi = lat / pi\n\tlamda = lon / pi\n\ty = phi * R\n\tx = np.cos(phi) * lamda * R\n\t\n\treturn x, y\n\n#-----------------------------------------------------------------------\ndef get_point_tile_Sinusoidal(cord, pos):\n\t'''\n\tFunction of calculating the tile of a specific point\n\t\n\tParameters\n ----------\n \tcord : list or tuple\n\t\t\t geographical coordinates (latitude, longituede)\n\t\t\n\t\tpos : position of the point\n\t\t\n\tReturn\n ----------\n \thid : int\n \t\t horizental index of the point\n \tvid : int\n \t\t vetical index of the point\n \ttile: str\n \t\t tile name\n\t'''\n\timport numpy as np\n\t\n\n\thalfHoriLenght = 20015109.354\n\ttileHoriLenght = halfHoriLenght/18\n\t\n\thalfVertLenght = 10007554.677\n\ttileVertLenght = halfVertLenght/9\n\t\n\n\tCeilLen = 926.625433056\n\tnumCeil = 1200\n\t\n\tx, y = geog_to_sinu(cord)\n\tx = x + CeilLen/2.0\n\ty = y - CeilLen/2.0\n\n\tx_res = abs(np.round(x / tileHoriLenght) * tileHoriLenght - x)\n\ty_res = abs(np.round(y / tileVertLenght) * tileVertLenght - y)\n\t\n\thid = np.round(x // tileHoriLenght) + 18\n\tvid = 8 - np.round(y // tileVertLenght)\n\n\tif y_res < CeilLen/2.0:\n# \t\tprint('yyyy')\n\t\tif pos == 'UpperLeft':\n\t\t\tvid = int(vid) + 1\n\t\tif pos == 'LowerRight':\n\t\t\tvid = int(vid)\n\t\tif pos == 'UpperRight':\n\t\t\tvid = int(vid) + 1\n\t\tif pos == 'LowerLeft':\n\t\t\tvid = int(vid)\n\telse:\n\t\tvid = int(vid)\n\n\tif x_res < CeilLen/2.0:\n\t\tprint('xxxx')\n\t\tif pos == 'UpperLeft':\n\t\t\thid = int(hid)\n\t\tif pos == 'LowerRight':\n\t\t\thid = int(hid) - 1\n\t\tif pos == 'UpperRight':\n\t\t\thid = int(hid) - 1\t\n\t\tif pos == 'LowerLeft':\n\t\t\thid = int(hid)\n\telse:\n\t\thid = int(hid)\t\n# \tprint(hid, vid)\t\t\t\t\t\n\tstrhid = str(hid)\n\twhile len(strhid) < 2:\n\t\tstrhid = '0' + strhid\n\n\t\t\n\tstrvid = str(vid)\n\twhile len(strvid) < 2:\n\t\tstrvid = '0' + strvid\n\t\n\ttile = 'h' + strhid + 'v' + strvid\n\n\treturn hid, vid, tile\t\n\n#-----------------------------------------------------------------------\ndef get_cord_Sinusoidal(cord, numCeil = 1200, debug = 0):\n\t'''\n\tFunction to get the geographical coordinates of the given region. \n\tThe coordinates are correspoding to Sinusodial grid.\n\t\n\tParameters\n ----------\n \tcord : list or tuple\n\t\t\t geographical coordinates \n\t\t\t (Top Latitude, Bottom Latitude, Left Longitude, Right Longitude)\n\t\t\n\t\tnumCeil : number of pixel in one tile, default value 1200\n\t\t\n\tReturn\n ----------\n\t\tmeshLat : array\n\t\t\t\t longitude \n\t\tmeshLon : array\n\t\t\t\t longitude \n\t\thidMin\t: int\n \t\t vetical index of the point \n\t\thidMax\t: int\n \t\t \t vetical index of the point \n\t\tvidMin : int\n \t\t \t vetical index of the point \n\t\tvidMax : int\n \t\t \t vetical index of the point\n\t'''\n\timport numpy as np\n\t\n\tUpperLeft = (cord[0], cord[2])\n\tUpperRight = (cord[0], cord[3])\n\tLowerLeft = (cord[1], cord[2])\n\tLowerRight = (cord[1], cord[3])\n\n\ttileInfor = []\n\n\ttileInfor.append(get_point_tile_Sinusoidal((UpperLeft), 'UpperLeft'))\n\ttileInfor.append(get_point_tile_Sinusoidal((UpperRight), 'UpperRight'))\n\ttileInfor.append(get_point_tile_Sinusoidal((LowerRight), 'LowerRight'))\n\ttileInfor.append(get_point_tile_Sinusoidal((LowerLeft), 'LowerLeft'))\n\n\thid = []\n\tvid = []\n\tfor item in tileInfor:\n\t\thid.append(item[0])\n\t\tvid.append(item[1])\n\t\n\thidMax = np.max(hid)\n\thidMin = np.min(hid)\n\n\tvidMax = np.max(vid)\n\tvidMin = np.min(vid)\n\n\tnum_h = hidMax - hidMin + 1\n\tnum_v = vidMax - vidMin + 1\n\n\t# update the hid and vid\n\thid = np.arange(hidMin, hidMax + 1, 1)\n\tvid = np.arange(vidMin, vidMax + 1, 1)\n\t\n\tGridDim = (num_v * numCeil, num_h * numCeil)\n\tmeshLat = np.full(GridDim, np.nan)\n\tmeshLon = np.full(GridDim, np.nan)\n\t\n\tif debug == 1:\n\t\tprint(hid)\n\t\tprint(vid)\n\t\n\tfor hh in hid:\n\t\tfor vv in vid:\n\t\t\t\n\t\t\tstrhid = str(hh)\n\t\t\twhile len(strhid) < 2:\n\t\t\t\tstrhid = '0' + strhid\n\n\t\t\tstrvid = str(vv)\n\t\t\twhile len(strvid) < 2:\n\t\t\t\tstrvid = '0' + strvid\n\t\n\t\t\ttile = 'h' + strhid + 'v' + strvid\n\t\t\t\n\t\t\tprint(tile)\n\t\t\tif debug == 1:\n\t\t\t\tprint('\\n',tile)\n\t\t\n\t\t\tlatitude, longitude = cal_sinu_grid(tile, numCeil)\n\t\t\tif debug == 1:\n\t\t\t\tprint(np.max(latitude), np.min(latitude))\n\t\t\thIdx = hh - hidMin \n\t\t\tvIdx = vv - vidMin\n\t\t\t\n\t\t\tif debug == 1:\n\t\t\t\tprint(hIdx, vIdx)\n\t\t\t\tprint(vIdx * numCeil, (vIdx + 1) * numCeil, hIdx * numCeil, (hIdx + 1) * numCeil)\n\t\t\n\t\t\tmeshLat[vIdx * numCeil : (vIdx + 1) * numCeil, \\\n\t\t\t\t\thIdx * numCeil : (hIdx + 1) * numCeil, ] = latitude\n\t\t\t\t\n\t\t\tmeshLon[vIdx * numCeil : (vIdx + 1) * numCeil, \\\n\t\t\t\t\thIdx * numCeil : (hIdx + 1) * numCeil, ] = longitude\t\n\treturn meshLat, meshLon, hidMin, hidMax, vidMin, vidMax\n\n#-----------------------------------------------------------------------\n'''\nFunction of PlateCarree...\n'''\ndef get_point_tile_PlateCarree(cord, pos):\n\t\n\tlat = cord[0]\n\tlon = cord[1]\n\n# \tprint(' - get_point_tile_VNP46A1', lat // 10, lon // 10)\n\tvid = 8 - lat // 10\n\thid = 18 + lon // 10\n\t\n\tres_lat = lat%10\n\tres_lon = lon%10\n\t\n\tif res_lat == 0:\n\t\tif pos == 'UpperLeft':\n\t\t\tvid = int(vid) + 1\n\t\tif pos == 'LowerRight':\n\t\t\tvid = int(vid)\n\t\tif pos == 'UpperRight':\n\t\t\tvid = int(vid) + 1\n\t\tif pos == 'LowerLeft':\n\t\t\tvid = int(vid)\n\n\tif res_lon == 0:\n\t\tif pos == 'UpperLeft':\n\t\t\thid = int(hid)\n\t\tif pos == 'LowerRight':\n\t\t\thid = int(hid) - 1\n\t\tif pos == 'UpperRight':\n\t\t\thid = int(hid) - 1\t\n\t\tif pos == 'LowerLeft':\n\t\t\thid = int(hid)\n\t\n\tstrhid = str(hid)\n\twhile len(strhid) < 2:\n\t\tstrhid = '0' + strhid\n\n\tstrvid = str(vid)\n\twhile len(strvid) < 2:\n\t\tstrvid = '0' + strvid\n\t\t\n\ttile = 'h' + strhid + 'v' + strvid\n\n\treturn hid, vid, tile\n\n#-----------------------------------------------------------------------\ndef cal_PlateCarree_grid(tile, numCeil):\n\t\n\timport numpy as np\n\t\n\tvid = np.int(np.float(tile[4:6]))\n\thid = np.int(np.float(tile[1:3]))\n\tprint(' - Calulating geographical coordinates of ', tile)\n\tprint(' Vertical Tile:', vid, 'Horizontal Tile:', hid)\t\n\t\n\tlatBoundary = [(8 - vid) * 10, (9 - vid) * 10]\n\tlonBoundary = [(hid - 18) * 10, (hid - 17) * 10]\n\t\n\tlatitude = np.linspace(latBoundary[1], latBoundary[0], numCeil)\n\n\tlongitude = np.linspace(lonBoundary[0], lonBoundary[1], numCeil)\n\n\tlatitude = (latitude * np.ones((numCeil,1),np.float32)).T\n\t\n\tlongitude = np.ones((numCeil,1),np.float32) * longitude\n \n\treturn latitude, longitude\n\n#-----------------------------------------------------------------------\ndef get_cord_PlateCarree(cord, numCeil = 2400, debug = 0):\n\t'''\n\tFunction to get the geographical coordinates of the given region. \n\tThe coordinates are correspoding to Sinusodial grid.\n\t\n\tParameters\n ----------\n \tcord : list or tuple\n\t\t\t geographical coordinates \n\t\t\t (Top Latitude, Bottom Latitude, Left Longitude, Right Longitude)\n\t\t\n\t\tnumCeil : number of pixel in one tile, default value 1200\n\t\t\n\tReturn\n ----------\n\t\tmeshLat : array\n\t\t\t\t longitude \n\t\tmeshLon : array\n\t\t\t\t longitude \n\t\thidMin\t: int\n \t\t vetical index of the point \n\t\thidMax\t: int\n \t\t \t vetical index of the point \n\t\tvidMin : int\n \t\t \t vetical index of the point \n\t\tvidMax : int\n \t\t \t vetical index of the point\n\t'''\n\timport numpy as np\n\n\tUpperLeft = (cord[0], cord[2])\n\tUpperRight = (cord[0], cord[3])\n\tLowerLeft = (cord[1], cord[2])\n\tLowerRight = (cord[1], cord[3])\n\n\ttileInfor = []\n\n\t\t\n\ttileInfor.append(get_point_tile_PlateCarree(UpperLeft, 'UpperLeft'))\n\ttileInfor.append(get_point_tile_PlateCarree(UpperRight, 'UpperRight'))\n\ttileInfor.append(get_point_tile_PlateCarree(LowerRight, 'LowerRight'))\n\ttileInfor.append(get_point_tile_PlateCarree(LowerLeft, 'LowerLeft'))\n\n\thid = []\n\tvid = []\n\tfor item in tileInfor:\n\t\thid.append(item[0])\n\t\tvid.append(item[1])\n\t\n\thidMax = np.max(hid)\n\thidMin = np.min(hid)\n\n\tvidMax = np.max(vid)\n\tvidMin = np.min(vid)\n\t\n\t\n\tnum_h = hidMax - hidMin + 1\n\tnum_v = vidMax - vidMin + 1\n\n\t# update the hid and vid\n\thid = np.arange(hidMin, hidMax + 1, 1)\n\tvid = np.arange(vidMin, vidMax + 1, 1)\n\t\n\tGridDim = (num_v * numCeil, num_h * numCeil)\n\tmeshLat = np.full(GridDim, np.nan)\n\tmeshLon = np.full(GridDim, np.nan)\n\t\n\tif debug == 1:\n\t\tprint('\\n - get_cord_VNP46A1 - hid: ', hid)\n\t\tprint('\\n - get_cord_VNP46A1 - vid: ', vid)\n\t\n\n\tfor hh in hid:\n\t\tfor vv in vid:\t\n\t\t\tstrhid = str(hh)\n\t\t\twhile len(strhid) < 2:\n\t\t\t\n\t\t\t\tstrhid = '0' + strhid\n\n\t\t\tstrvid = str(vv)\n\t\t\twhile len(strvid) < 2:\n\t\t\t\tstrvid = '0' + strvid\n\t\n\t\t\ttile = 'h' + strhid + 'v' + strvid\n\t\n\t\t\tif debug == 1:\n\t\t\t\tprint('\\n - get_cord_VNP46A1 - tile: ',tile, hh, vv)\n\t\t\t\n\t\t\tlatitude, longitude = cal_PlateCarree_grid(tile, numCeil)\n\t\t\t\n\t\t\t\n\t\t\tif debug == 1:\n\t\t\t\tprint('\\n - get_cord_VNP46A1 - latitude: ',latitude[0,0], latitude[-1,0])\n\t\t\thIdx = hh - hidMin \n\t\t\tvIdx = vv - vidMin\n\t\t\t\n\t\t\tif debug == 1:\n\t\t\t\tprint(hIdx, vIdx)\n\t\t\t\tprint(vIdx * numCeil, (vIdx + 1) * numCeil, hIdx * numCeil, (hIdx + 1) * numCeil)\n\t\t\n\t\t\tmeshLat[vIdx * numCeil : (vIdx + 1) * numCeil, \\\n\t\t\t\t\thIdx * numCeil : (hIdx + 1) * numCeil, ] = latitude\n\t\t\t\t\n\t\t\tmeshLon[vIdx * numCeil : (vIdx + 1) * numCeil, \\\n\t\t\t\t\thIdx * numCeil : (hIdx + 1) * numCeil, ] = longitude\t\n\t\t\t\t\t\n\treturn meshLat, meshLon, hidMin, hidMax, vidMin, vidMax\n\t\n#-----------------------------------------------------------------------\ndef plateCarree_to_geog(cord):\n\t'''\n\tFunction of converting the Plate Carree to geographical projection\n\thttps://en.wikipedia.org/wiki/Equirectangular_projection\n\t\n\tParameters\n ----------\n\t\tsinusoidal point - list or tuple liked, (x, y)\n\t\t\n\tReturn\n ----------\n\t\tgeographical coordinates - latitude, longituede\n\t\n\t\n\t'''\n\timport numpy as np\n\tfrom numpy import sin, cos\n\tx = cord[0]\n\ty = cord[1]\n\tpi = 180.0 / np.pi\n\tR = 6371007.181000\n\t\n\tlamda_0 = 0.0\n\tphi_1 = 0.0\n\t\n\tlamda = x / R / cos(phi_1) + lamda_0\n\tphi\t = y / R + phi_1\n\n\tlat = phi * pi\n\tlon = lamda * pi\n\n\treturn lat, lon\n\n#-----------------------------------------------------------------------\ndef geog_to_plateCarree(cord):\n\t'''\n\tFunction of converting the geographical projection to Plate Carree\n\thttps://en.wikipedia.org/wiki/Equirectangular_projection\n\t\n\tParameters\n ----------\n\t\tgeographical coordinates - list or tuple liked, (latitude, longituede)\n\t\t\n\tReturn\n ----------\n\t\tequirectangular point - (x, y)\n\t\n\t\n\t'''\n\timport numpy as np\n\tfrom numpy import sin, cos\n\t\n\tpi = 180.0 / np.pi\n\tR = 6371007.181000\n\t\n\tlamda_0 = 0.0\n\tphi_1 = 0.0\n\t\n\tlat = cord[0]\n\tlon = cord[1]\n\t\n\tphi = lat / pi\n\tlamda = lon / pi\t\n\t\n\tx = R * (lamda - lamda_0) * cos(phi_1)\n\ty = R * (phi - phi_1)\n\t\n\treturn x, y\t\n\n#-----------------------------------------------------------------------\ndef get_tile_boundary_plateCarree(tile):\n\n\timport numpy as np\n\tvid = np.int(np.float(tile[4:6]))\n\thid = np.int(np.float(tile[1:3]))\n\tprint(' - Calulating geographical coordinates of ', tile)\n\tprint(' - Vertical Tile:', vid, 'Horizontal Tile:', hid)\t\n\t\n\t\n\tsouth = (8 - vid) * 10.0\n\tnorth = (9 - vid) * 10.0\n\twest = (hid - 18) * 10\n\teast = (hid - 17) * 10\n\n\treturn north, south, west, east\n\n#-----------------------------------------------------------------------\ndef get_tiles(cord):\n\n\timport numpy as np\n\t\n\thid_top, vid_top, _ = get_point_tile_PlateCarree((cord[0], cord[2]), pos = 'UpperLeft')\n\thid_bot, vid_bot, _ = get_point_tile_PlateCarree((cord[1], cord[3]), pos = 'LowerRight')\n\n# \tprint( hid_top, vid_top )\n# \tprint( hid_bot, vid_bot ) \n\n\thids = np.arange(hid_top, hid_bot + 1, 1)\n\tvids = np.arange(vid_top, vid_bot + 1, 1)\n\n\ttiles = []\n\tfor hid in hids:\n\t\tfor vid in vids:\n\t\t\tstrhid = str(hid)\n\t\t\twhile len(strhid) < 2:\n\t\t\t\tstrhid = '0' + strhid\n\n\t\t\tstrvid = str(vid)\n\t\t\twhile len(strvid) < 2:\n\t\t\t\tstrvid = '0' + strvid\n\t\t\n\t\t\ttile = 'h' + strhid + 'v' + strvid\n\t\t\ttiles.append(tile)\n\treturn tiles\n\t\n#-----------------------------------------------------------------------\t\ndef get_city_cords(cityname):\n\n\tcords = {}\n\tcords['Los Angeles'] = [35, 33, -119, -117]\n\tcords['Chicago'] = [42.5, 41.5, -88.5, -87]\n\tcords['Denver'] = [40.5, 39.5, -105.5, -104.5]\n\tcords['San Fracisco'] = []\n\tcords['New York'] = [42, 40, -73, -71]\n\tcords['Seattle'] = [48, 47, -123, -122]\n\tcords['Washington D.C.'] = []\n\tcords['Boston'] = []\n\tcords['Houston'] = [30.25, 29.25, -96, -94.8]\n\t\n\treturn cords[cityname]\n\n\n\n#-----------------------------------------------------------------------\t\ndef alber_equal_area(lon, lat, lat_0 = 40, lat_1 = 20, lat_2 = 60, lon_0 = -96):\n\t'''\n\tFunction to convert ... to alber equal area\n\t'''\n\t\n\timport numpy as np\n\tfrom numpy import cos, sin, log\n\n\tdef cal_alpha(phi, e):\n\n\t\tdef first_term(phi, e):\n\t\n\t\t\ttan_phi = sin(phi) / ( 1 - (e*sin(phi))**2 )\n\t\n\t\t\treturn tan_phi\n\n\t\tdef second_term(phi, e):\n\n\t\t\txx = ( 1 / (2 * e) ) * log( (1 - e * sin(phi) ) / ( 1 + e * sin(phi) ) )\n\t\n\t\t\treturn xx\n\n\t\talpha = (1 - e**2) * ( first_term(phi, e) - second_term(phi, e) )\n\n\t\treturn alpha\n\t\t\n\tpi = 180.0 / np.pi\n\tR = 6378137\n\t# flattening\n\tf = 1/298.257233\n\t# eccentricity\n\te = (2*f - f**2)**0.5\n\t\n\tlamda = lon / pi\n\t\n\tphi = lat / pi\n\n\tphi_0 = lat_0 / pi\n\t\n\tphi_1 = lat_1 / pi\n\t\n\tphi_2 = lat_2 / pi\n\t\n\tlamda_0 = lon_0 / pi\n\t\n\talpha = cal_alpha(phi, e)\n\t\n\talpha_0 = cal_alpha(phi_0, e)\n\t\n\talpha_1 = cal_alpha(phi_1, e)\n\t\n\talpha_2 = cal_alpha(phi_2, e)\n\t\n\tm1 = cos(phi_1) / ( 1 - (e * sin(phi_1))**2)**0.5\n\t\n\tm2 = cos(phi_2) / ( 1 - (e * sin(phi_2))**2)**0.5\n\t\n\tn = (m1**2 - m2**2) / (alpha_2 - alpha_1)\n\t\n\tC = m1**2 + n * alpha_1\n\t\n\ttheta = n * (lamda - lamda_0)\n\n\trho = R/n * (C - n*alpha)**0.5\n\t\n\trho_0 = R/n * (C - n*alpha_0)**0.5\n\n\tx = rho * sin(theta)\n\t\n\ty = rho_0 - rho*cos(theta)\n\t\n\treturn x, y\n\t\n","repo_name":"ywang37/pylib","sub_path":"convert_time_coordinate.py","file_name":"convert_time_coordinate.py","file_ext":"py","file_size_in_byte":22258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30167151240","text":"import unittest\nimport app\nimport json\nfrom pymongo import MongoClient\n\n\nclass DimensionsTestCase(unittest.TestCase):\n\n # runs before each test method\n def setUp(self):\n app.app.config['TESTING'] = True\n client = MongoClient(app.app.config['MONGODB_URL'])\n db = client.activitytracker\n event_collection = db.Events\n skill_collection = db.Skills\n dimension_collection = db.Dimensions\n user_collection = db.Users\n\n event_collection.remove({})\n skill_collection.remove({})\n dimension_collection.remove({})\n user_collection.remove({})\n\n user_collection.insert_one({\n 'firstname': 'Admin',\n 'lastname': 'Admin',\n 'email': 'admin@neu.edu',\n 'password': 'P@$$w0rD',\n 'token': 'ADMIN_TOKEN',\n 'tokenTTL': 1000,\n 'is_auth': True,\n 'events': [],\n 'roles': ['admin', 'faculty', 'superuser'],\n 'year': None,\n 'major': None,\n 'skills': [],\n 'dimensions': []\n })\n\n self.app = app.app.test_client()\n\n # tests getting all of the dimensions from the system\n def test_get_dimension(self):\n rv = self.app.post('/administrator/addDimension/ADMIN_TOKEN',\n data=json.dumps(dict(name=\"TestDimension\")),\n content_type='application/json')\n\n rv = self.app.get('/dimensions/getDimensions')\n assert \"TestDimension\" in rv.data\n obj = json.loads(rv.data)\n assert len(obj[\"data\"]) is 1","repo_name":"cjellis/GATSBackend","sub_path":"tests/test_dimensions.py","file_name":"test_dimensions.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10535173313","text":"#!/usr/bin/env python3\n\n##\n# HEADER\n\nimport socket\nimport nmap3\nimport sys\nsys.path.append(\"..\")\n\nfrom utils import *\n\n#\n##\n\nclass ScanMachine():\n\n def __init__(self, target, output=\"Screen\"):\n\n self.output = output\n self.target = target\n self.host = \"\"\n self.hostname = \"\"\n self.ports = []\n\n self.t1 = \"\"\n self.t2 = \"\"\n\n checkRoot(exitOnFail=False)\n\n if isup(self.target):\n\n print(f\"{self.target} is up\")\n\n else:\n\n print(f\"{self.target} is down, exiting ..\")\n exit()\n\n self.validTarget()\n\n self.t1 = time.time()\n\n self.scanPort()\n\n self.t2 = truncate(time.time() - self.t1)\n\n self.outPut()\n\n def getoutPut(self):\n\n return self.report\n\n def outPut(self):\n if self.output == \"None\":\n\n pass\n\n elif self.output == \"Screen\":\n\n print(\"PORT\\t\\tSTATE\\t\\tSERVICE\\t\\tVERSION\\n\")\n\n for x in range(len(self.ports)):\n\n if len(self.ports[x]) == 5:\n\n print(f'{self.ports[x][\"portid\"]}/{self.ports[x][\"protocol\"]} \\t{self.ports[x][\"state\"]} \\t{self.ports[x][\"service\"]} \\t{self.ports[x][\"version\"]}')\n\n else:\n\n print(f'{self.ports[x][\"portid\"]}/{self.ports[x][\"protocol\"]} \\t{self.ports[x][\"state\"]} \\t{self.ports[x][\"service\"]}')\n\n print(f'\\nHost ({self.target}) scanned in {self.t2} sec')\n\n elif self.output == \"console\":\n\n self.report = dict()\n\n self.report[\"ip\"] = self.host\n self.report[\"hostname\"] = self.hostname\n self.report[\"ports\"] = self.ports\n\n elif self.output == \"File\":\n\n with open(\"tmp\", \"w\") as f:\n\n print(f'''\\\n\"ip\" : \"{self.host}\"\n\"hostname\" : \"{self.hostname}\"\n\"ports\" : [\\\n''', file=f)\n\n count = 0\n\n with open(\"tmp\", \"a\") as f:\n\n for x in range(len(self.ports)):\n\n count += 1\n\n if len(self.ports[x]) == 5:\n\n if count < len(self.ports):\n\n print(f'''\\\n {{\n \"portid\" : \"{self.ports[x][\"portid\"]}\",\n \"protocol\" : \"{self.ports[x][\"protocol\"]}\",\n \"state\" : \"{self.ports[x][\"state\"]}\",\n \"service\" : \"{self.ports[x][\"service\"]}\",\n \"version\" : \"{self.ports[x][\"version\"]}\"\n }},\\\n''', file=f)\n else:\n print(f'''\\\n {{\n \"portid\" : \"{self.ports[x][\"portid\"]}\",\n \"protocol\" : \"{self.ports[x][\"protocol\"]}\",\n \"state\" : \"{self.ports[x][\"state\"]}\",\n \"service\" : \"{self.ports[x][\"service\"]}\",\n \"version\" : \"{self.ports[x][\"version\"]}\"\n }}\\\n''', file=f)\n\n else:\n\n if count < len(self.ports):\n\n print(f'''\\\n {{\n \"portid\" : \"{self.ports[x][\"portid\"]}\",\n \"protocol\" : \"{self.ports[x][\"protocol\"]}\",\n \"state\" : \"{self.ports[x][\"state\"]}\",\n \"service\" : \"{self.ports[x][\"service\"]}\"\n }},\\\n''', file=f)\n else:\n\n print(f'''\\\n {{\n \"portid\" : \"{self.ports[x][\"portid\"]}\",\n \"protocol\" : \"{self.ports[x][\"protocol\"]}\",\n \"state\" : \"{self.ports[x][\"state\"]}\",\n \"service\" : \"{self.ports[x][\"service\"]}\"\n }}\\\n''', file=f)\n\n print(\"]\", file=f)\n\n def validTarget(self):\n\n try:\n\n socket.inet_aton(self.target)\n self.host = self.target\n hostname = socket.gethostbyaddr(self.target)\n self.hostname = hostname[0]\n\n except socket.error:\n\n self.hostname = self.target\n try:\n\n self.host = socket.gethostbyname(self.target)\n\n except socket.gaierror:\n\n print(f\"Cannot get ip from {self.target}, exiting.\")\n exit()\n\n def scanPort(self):\n\n port_s = nmap3.Nmap()\n port_o = port_s.nmap_version_detection(self.host)\n\n for x in range(len(port_o[self.host][\"ports\"])):\n OPEN_PORT = {\n \"protocol\" : \"\",\n \"portid\" : \"\",\n \"state\" : \"\",\n \"service\" : \"\"\n }\n\n OPEN_PORT[\"protocol\"] = port_o[self.host][\"ports\"][x][\"protocol\"]\n OPEN_PORT[\"portid\"] = port_o[self.host][\"ports\"][x][\"portid\"]\n OPEN_PORT[\"state\"] = port_o[self.host][\"ports\"][x][\"state\"]\n OPEN_PORT[\"service\"] = port_o[self.host][\"ports\"][x][\"service\"][\"name\"]\n\n if \"product\" in port_o[self.host][\"ports\"][x][\"service\"]:\n\n OPEN_PORT[\"version\"] = port_o[self.host][\"ports\"][x][\"service\"][\"product\"]\n\n if \"version\" in port_o[self.host][\"ports\"][x][\"service\"]:\n\n OPEN_PORT[\"version\"] = OPEN_PORT[\"version\"] + port_o[self.host][\"ports\"][x][\"service\"][\"version\"]\n\n self.ports.append(OPEN_PORT)\n\nif __name__ == \"__main__\":\n ScanMachine(\"scanme.nmap.org\", output=\"console\")\n","repo_name":"Pixailz/PyMod","sub_path":"modules/scan_port.py","file_name":"scan_port.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3734732422","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n#=============================================================================\n# ProjectName: seekplum\n# FileName: hello_world\n# Desc: 官方示例代码\n# Author: seekplum\n# Email: 1131909224m@sina.cn\n# HomePage: seekplum.github.io\n# Create: 2018-12-22 17:23\n#=============================================================================\n\"\"\"\nfrom __future__ import print_function\n\nimport time\nimport threading\n\nfrom tornado.ioloop import IOLoop\n\nfrom tornado.web import Application, RequestHandler\n\nfrom tornado import gen\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport redis_utils\n\n\nclass Result(object):\n def __init__(self):\n self._value = None\n self._evt = threading.Event()\n # self.__running = False\n\n def set_result(self, value):\n self._value = value\n self._evt.set()\n # self.__running = True\n\n def result(self):\n self._evt.wait()\n # while not self.__running:\n # pass\n return self._value\n\n\nclass Singleton(object):\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)\n\n return cls._instance\n\n\nclass Scheduler(Singleton):\n def __init__(self):\n self._result_queue = set()\n\n def add_result(self, r):\n self._result_queue.add(r)\n print(\"length: \", len(self._result_queue))\n\n def set_result(self, value):\n while self._result_queue:\n r = self._result_queue.pop()\n r.set_result(value)\n\n\ndef get_message(sleep_time):\n time.sleep(sleep_time)\n msg = \"Hello, world \" + \"\\n\"\n return msg\n\n\nclass MainHandler(RequestHandler):\n _executor = ThreadPoolExecutor(30)\n _scheduler = Scheduler()\n\n def do(self):\n r_client = redis_utils.redis_connect(\"127.0.01\", 6379)\n with redis_utils.redis_lock(\"hello\", r_client) as is_lock:\n if is_lock:\n print(\"start task...\")\n msg = get_message(3)\n print(\"end task\")\n self._scheduler.set_result(msg)\n else:\n print(\"task is running\")\n\n @gen.coroutine\n def get(self):\n r = Result()\n self._scheduler.add_result(r)\n yield self._executor.submit(self.do)\n data = r.result()\n self.write(data)\n\n\ndef make_app():\n return Application([\n (r\"/\", MainHandler),\n ])\n\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(12345)\n print(\"http://127.0.0.1:12345/\")\n IOLoop.current().start()\n","repo_name":"seekplum/seekplum","sub_path":"tornado_source/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20608706893","text":"'''\nDesafio 010\nFaça um programa que leia quanto dinheiro\numa pessoa tem na carteira e mostre quantos \ndolares ela pode comprar\n\nconsidere $ 1.00 = R$ 5.108\n\n'''\ncustoDolar = 5.108\ncarteira = float(input('Digite o valor de sua carteira '))\nprint('Com R${:.2f} você pode comprar um total de ${:.2f} dolares'.format(carteira, carteira/custoDolar))\n","repo_name":"WinterDP/Python_listaExercicios","sub_path":"desafio010.py","file_name":"desafio010.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25783441069","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nTheMoviePredictor script\nAuthor: Arnaud de Mouhy \nAuthor: Guillaume Meurisse \n\"\"\"\n\n# import mysql.connector\n# import sys\nimport argparse\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime, timedelta\nimport locale\nfrom dotenv import load_dotenv\n\nfrom movie import Movie\nfrom person import Person\nfrom omdb import Omdb\nfrom tmdb import Tmdb\nfrom moviemanager import MovieManager\n\nlocale.setlocale(locale.LC_ALL, 'fr_FR.utf8')\nload_dotenv()\n\n\ndef find_text_in_tag(tag_name, string):\n return lambda tag: tag.name == tag_name and string in tag.get_text()\n\n\ndef scrapWikiPage(page):\n soup = BeautifulSoup(page.content, 'html.parser')\n infos = {}\n infobox = soup.find('div', class_=re.compile('infobox'))\n entete = infobox.find('div', class_='entete')\n infos['title'] = entete.find('cite').get_text()\n section_ul = soup.find(id='Fiche_technique').parent.find_next_sibling('ul')\n item = section_ul.find(find_text_in_tag('li', 'Titre original'))\n if item:\n infos['original_title'] = item.find('i').get_text()\n item = section_ul.find(find_text_in_tag('li', 'Pays d\\'origine'))\n if item:\n infos['origin_country'] = item.find('span').get_text().lstrip(' ')\n item = section_ul.find(find_text_in_tag('li', 'Durée'))\n if item:\n infos['duration'] = re.findall('(\\d+)', item.get_text())[0]\n item = section_ul.find(find_text_in_tag('li', 'Dates de sortie'))\n if item:\n infos['release_date'] = {}\n release_dates = re.findall(r'(?:(\\w[^:\\n]*)\\s:\\s)?(\\d+)\\s(\\w+)\\s(\\d+)',\n item.get_text())\n for release_date in release_dates:\n if release_date[0] != '':\n country = release_date[0]\n year = release_date[1]\n month = datetime.strptime(release_date[2], \"%B\").strftime('%m')\n day = release_date[3]\n infos['release_date'][country] = '-'.join((year, month, day))\n item = section_ul.find(find_text_in_tag('li', 'Classification'))\n if item:\n rating_str = re.findall(r'France\\s:\\s([^\\n]*)', item.get_text())\n if rating_str:\n if rating_str[0].find('12') != -1:\n infos['rating'] = '-12'\n return infos\n\n\ndef scrapWikiInfobox(page):\n soup = BeautifulSoup(page.content, 'html.parser')\n infos = {}\n infobox = soup.find('div', class_='infobox_v3')\n infos['title'] = infobox.find('div', class_='entete').find('cite').get_text()\n keys = soup.find('div', class_='infobox_v3').find('tbody').find_all('th')\n values = soup.find('div', class_='infobox_v3').find('tbody').find_all('td')\n\n for row, raw_key in enumerate(keys):\n key = raw_key.get_text()\n if key == 'Titre original':\n infos['original_title'] = values[row].get_text().lstrip('\\n')\n elif key == 'Sortie':\n infos['release_date'] = values[row].get_text().lstrip('\\n')\n elif key == 'Durée':\n infos['duration'] = values[row].get_text().lstrip('\\n').strip('\\xa0minutes')\n elif key == 'Acteurs principaux':\n actors = values[row].find_all('a')\n infos['cast'] = []\n for actor in actors:\n infos['cast'].append(actor.get_text())\n else:\n entries = values[row].findAll(['a', 'i'])\n if len(entries) > 1:\n infos[key] = []\n for entry in entries:\n if entry != '':\n infos[key].append(entry.get_text())\n else:\n infos[key] = values[row].get_text().lstrip('\\n')\n return infos\n\n\ndef scrapWikiGeneric(page):\n soup = BeautifulSoup(page.content, 'html.parser')\n infos = {}\n section_ul = soup.find(id='Fiche_technique').parent.find_next_sibling('ul')\n for item in section_ul.findChildren('li'):\n match = re.match(r'(.*?)\\s?:', item.getText())\n if match:\n key = match.group(1)\n elements = item.findAll(['a', 'i'])\n if len(elements) == 1:\n infos[key] = elements[0].getText()\n elif len(elements) >= 2:\n infos[key] = []\n for element in elements:\n infos[key].append(element.getText())\n else:\n info = re.findall(r':\\s(.*)', item.getText())\n if info:\n infos[key] = info[0]\n return infos\n\n\npre_parser = argparse.ArgumentParser(add_help=False)\npre_parser.add_argument('context', choices=['people', 'movies'], nargs='?')\ncontext = pre_parser.parse_known_args()[0].context\n\nparser = argparse.ArgumentParser(description='Process MoviePredictor data')\nparser.add_argument('context', choices=['people', 'movies'],\n help='le contexte dans lequel nous allons travailler')\n\naction_subparser = parser.add_subparsers(title='action', dest='action')\n\nlist_parser = action_subparser.add_parser('list',\n help='liste les entitées du contexte')\nlist_parser.add_argument('--export', help='chemin du fichier exporté')\n\nfind_parser = action_subparser.add_parser('find',\n help='trouve une entité selon un paramètre')\nfind_parser.add_argument('id', help='identifant à  rechercher')\ninsert_parser = action_subparser.add_parser('insert',\n help='insère une nouvelle entité')\n\nif context == \"people\":\n insert_parser.add_argument('--firstname',\n help='prénom de l\\'entité à insérer',\n required=True)\n insert_parser.add_argument('--lastname',\n help='nom de famille de l\\'entité à insérer',\n required=True)\nelif context == \"movies\":\n insert_parser.add_argument('--title',\n help='titre du film à insérer',\n required=True)\n insert_parser.add_argument('--duration', type=int,\n help='durée du film à insérer', required=True)\n insert_parser.add_argument('--original-title',\n help='titre original du film à insérer',\n required=True)\n insert_parser.add_argument('--rating', choices=[\"TP\", \"-12\", \"-16\", \"-18\"],\n help='catégorie d\\'age du film à insérer',\n required=True)\n insert_parser.add_argument('--release-date', metavar='YYYY-MM-DD',\n help='date de sortir du film à insérer',\n required=True)\n\n import_parser = action_subparser.add_parser(\n 'import',\n help='importe des entités à partir d\\'un fichier'\n )\n import_parser.add_argument('--file', metavar='file.csv',\n help='fichier d\\'où importer les entitées')\n import_parser.add_argument('--api', choices=['omdb', 'tmdb'], help='TODO')\n import_parser.add_argument('--imdbId', metavar='tt123456', help='TODO')\n import_parser.add_argument('--new-movies', metavar='',\n type=int, help='TODO')\n\n movie_parser = action_subparser.add_parser(\n 'scrap',\n help='importe des entités à partir d\\'une page Wikipedia')\n movie_parser.add_argument('url', help='page Wikipedia d\\'un film')\n\nargs = parser.parse_args()\n\n\nmov_man = MovieManager('127.0.0.1',\n 'predictor',\n 'predictor',\n 'predictor')\n\nif args.context == \"people\":\n if args.action == \"list\":\n people = mov_man.findall(\"people\")\n if args.export:\n with open(args.export, 'w', encoding='utf-8', newline='\\n') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(people[0].__dict__.keys())\n for person in people:\n writer.writerow(person.__dict__.values())\n else:\n for person in people:\n print(person)\n if args.action == \"find\":\n peopleId = args.id\n person = mov_man.find_person(peopleId)\n print(person)\n if args.action == \"insert\":\n id = mov_man.insertPerson(Person(args.firstname, args.lastname))\n print(\"New person added with id: \"+str(id))\n\nif args.context == \"movies\":\n if args.action == \"list\": \n movies = mov_man.findall(\"movies\")\n for movie in movies:\n print(movie)\n if args.action == \"find\": \n movieId = args.id\n movie = mov_man.find_movie(movieId)\n print(movie)\n if args.action == \"insert\":\n movie = Movie(args.title, args.original_title, args.duration, args.rating, args.release_date)\n id = mov_man.insertMovie(movie)\n print(\"New movie added with id: \"+str(id))\n if args.action == \"import\":\n if args.file:\n with open(args.file, 'r', newline='\\n', encoding='utf-8') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n movie = Movie(row['title'], row['original_title'],\n row['duration'], row['rating'],\n row['release_date'])\n mov_man.insertMovie(movie)\n # insertMovieDict(row)\n if args.api == 'omdb':\n api = Omdb()\n movie = api.get_imdb_movie(args.imdbId)\n mov_man.insertMovie(movie)\n elif args.api == 'tmdb':\n api = Tmdb()\n movie = api.get_imdb_movie(args.imdbId)\n mov_man.insertMovie(movie)\n elif args.new_movies:\n api = Tmdb()\n now = datetime.today()\n last_week = now - timedelta(days=args.new_movies)\n movies = api.get_movies_by_dates(from_date=str(last_week.date()),\n to_date=str(now.date()))\n for movie in movies:\n if movie.imdb_id is None:\n continue\n movie_id = mov_man.insertMovie(movie)\n movie_id = mov_man.find_movie_id(movie.imdb_id)\n crew = api.get_credits(movie.imdb_id)\n role_ids = range(1, 6)\n roles = ['actors','directors','producers','writers','editors']\n for role, role_id in zip(roles, role_ids):\n people = crew[role]\n for person in people:\n print(person)\n person_id = mov_man.insertPerson(person)\n person_id = mov_man.find_person_id(person.imdb_id)\n mov_man.insertCredit(movie_id, person_id, role_id)\n \n\n if args.action == 'scrap':\n page = requests.get(args.url)\n print(scrapWikiPage(page))\n print(scrapWikiInfobox(page))\n print(scrapWikiGeneric(page))\n # print(str(scrapWikiGeneric(page)).encode('utf-8'))\n","repo_name":"Simplon-IA-Bdx-1/the-movie-predictor-guitoo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22224818545","text":"import random\nimport time\nimport sys\nfrom game import constants\nfrom game.action import Action\nfrom game.point import Point\nfrom game.actor import Actor\n\nclass HandleBoxCollisions(Action):\n \"\"\"A code template for handling collisions. The responsibility of this class of objects is to update the game state when actors collide.\n \n Stereotype:\n Controller\n \"\"\"\n def __init__(self, physics_service):\n super().__init__()\n self._physics_service = physics_service\n self._key_value = ''\n self._x = 0 \n self._y = constants.MAX_Y - 20\n \n def set_key_value(self, key):\n self._key_value = key\n\n def get_key_value(self):\n return self._key_value \n\n def set_x(self, x):\n self._x = x\n \n def set_y(self, y):\n self._y = y\n\n def execute(self, cast):\n \"\"\"Executes the action using the given actors.\n\n Args:\n cast (dict): The game actors {key: tag, value: list}.\n \"\"\"\n marquee = cast[\"marquee\"][0] # there's only one\n hero = cast[\"hero\"][0] # there's only one\n boxes = cast[\"boxes\"]\n locked_door = cast[\"locked_door\"]\n marquee.set_text(self._key_value)\n hero_spot = hero.get_position()\n hero_y = hero_spot.get_y()\n for box in boxes:\n if self._physics_service.is_collision(hero, box):\n description = box.get_description()\n marquee.set_text(description)\n if marquee.get_text() == 'KEY FOUND':\n self.set_key_value('KEY FOUND') \n ","repo_name":"ethancmeeker/Final_Game_The_Seeker","sub_path":"game/handle_box_collisions.py","file_name":"handle_box_collisions.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25854373612","text":"import os\nimport pandas as pd\n\nfrom dotenv import load_dotenv\nfrom tagoio_sdk import Analysis, Device\nfrom joblib import load\n\nload_dotenv()\n\nclassifier = load('model.joblib')\n\ndevice = Device({\n 'token': os.environ['DEVICE_TOKEN']\n})\n\n\ndef analysis(context, scope: list) -> None:\n variables = device.getData({\n 'query': 'last_item',\n 'variables': ['x', 'y', 'z', '010000024033', '010000030096', '020000032221', '020000033111']\n })\n\n data = {}\n for variable in variables:\n data[variable['variable']] = [variable['value']]\n\n X = pd.DataFrame.from_dict(data)\n\n y = classifier.predict(X)\n if y[0] == 1.:\n print(\"Fall detected! Sending message to contact\")\n\n device.sendData({\n 'variable': 'anomaly',\n 'value': y[0]\n })\n\n\nAnalysis({\"token\": os.environ['ANALYSIS_TOKEN']}).init(analysis)\n","repo_name":"matheus3301/TI0162-InternetDasCoisas-UFC-2023.1","sub_path":"demo/worker/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71272395626","text":"import asyncio\nimport discord\nfrom discord.ext import commands\nimport time\nimport datetime\n\n\nclass TimeRel(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def timer(self, ctx, time):\n if str(time).endswith('s'):\n timeinsec = int(str(time[:-1]))\n elif str(time).endswith('m'):\n timeinsec = int(str(time[:-1])) * 60\n elif str(time).endswith('h'):\n timeinsec = int(str(time[:-1])) * 60 * 60\n elif str(time).endswith('d'):\n timeinsec = int(str(time[:-1])) * 60 * 60 * 24\n elif str(time).endswith('w'):\n timeinsec = int(str(time[:-1])) * 60 * 60 * 24 * 7\n else:\n await ctx.send(\"Check the values you sent me again!\")\n return\n\n if timeinsec > 604800:\n await ctx.send(\"Wanna break me?! Limit is 1 week!!\")\n return\n themsg = await ctx.send(f\"{timeinsec}s..\")\n for _ in range(timeinsec):\n timeinsec -= 1\n if timeinsec % 5 == 0:\n await themsg.edit(content=f\"{timeinsec}s..\")\n await asyncio.sleep(1)\n await themsg.delete()\n await ctx.send(f'{ctx.author.mention}, Timer is Up!')\n\n @timer.error\n async def timer_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"Tell me how long the timer should last in seconds too!\")\n return\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Umm. Check that time you sent me again dude.\")\n return\n\n # Uptime Command\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def uptime(self, ctx):\n current_time = time.time()\n difference = int(round(current_time - self.bot._start_time))\n text = str(datetime.timedelta(seconds=difference))\n embed = discord.Embed(colour=0x00ff00)\n embed.add_field(name=\"Uptime\", value=text)\n embed.set_footer(\n text=f\"{self.bot.user.name}\",\n icon_url=f\"{self.bot.user.avatar_url}\"\n )\n try:\n await ctx.send(embed=embed)\n except discord.HTTPException:\n await ctx.send(\"Current uptime: \" + text)\n\n\ndef setup(bot):\n bot.add_cog(TimeRel(bot))\n","repo_name":"KrishGarg/Discord-Bot","sub_path":"src/cogs/TimeRel.py","file_name":"TimeRel.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8397640291","text":"import re\n\nfrom pygments.lexer import RegexLexer, bygroups, default, include, using, words\nfrom pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, \\\n String, Text\nfrom pygments.lexers._csound_builtins import OPCODES\nfrom pygments.lexers.html import HtmlLexer\nfrom pygments.lexers.python import PythonLexer\nfrom pygments.lexers.scripting import LuaLexer\n\n__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']\n\nnewline = (r'((?:(?:;|//).*)*)(\\n)', bygroups(Comment.Single, Text))\n\n\nclass CsoundLexer(RegexLexer):\n # Subclasses must define a 'single-line string' state.\n tokens = {\n 'whitespace': [\n (r'[ \\t]+', Text),\n (r'\\\\\\n', Text),\n (r'/[*](.|\\n)*?[*]/', Comment.Multiline)\n ],\n\n 'macro call': [\n (r'(\\$\\w+\\.?)(\\()', bygroups(Comment.Preproc, Punctuation),\n 'function macro call'),\n (r'\\$\\w+(\\.|\\b)', Comment.Preproc)\n ],\n 'function macro call': [\n (r\"((?:\\\\['\\)]|[^'\\)])+)(')\", bygroups(Comment.Preproc, Punctuation)),\n (r\"([^'\\)]+)(\\))\", bygroups(Comment.Preproc, Punctuation), '#pop')\n ],\n\n 'whitespace or macro call': [\n include('whitespace'),\n include('macro call')\n ],\n\n 'preprocessor directives': [\n (r'#(e(nd(if)?|lse)|ifn?def|undef)\\b|##', Comment.Preproc),\n (r'#include\\b', Comment.Preproc, 'include'),\n (r'#[ \\t]*define\\b', Comment.Preproc, 'macro name'),\n (r'@+[ \\t]*\\d*', Comment.Preproc)\n ],\n\n 'include': [\n include('whitespace'),\n (r'\"', String, 'single-line string')\n ],\n\n 'macro name': [\n include('whitespace'),\n (r'(\\w+)(\\()', bygroups(Comment.Preproc, Text),\n 'function macro argument list'),\n (r'\\w+', Comment.Preproc, 'object macro definition after name')\n ],\n 'object macro definition after name': [\n include('whitespace'),\n (r'#', Punctuation, 'object macro replacement text')\n ],\n 'object macro replacement text': [\n (r'(\\\\#|[^#])+', Comment.Preproc),\n (r'#', Punctuation, '#pop:3')\n ],\n 'function macro argument list': [\n (r\"(\\w+)(['#])\", bygroups(Comment.Preproc, Punctuation)),\n (r'(\\w+)(\\))', bygroups(Comment.Preproc, Punctuation),\n 'function macro definition after name')\n ],\n 'function macro definition after name': [\n (r'[ \\t]+', Text),\n (r'#', Punctuation, 'function macro replacement text')\n ],\n 'function macro replacement text': [\n (r'(\\\\#|[^#])+', Comment.Preproc),\n (r'#', Punctuation, '#pop:4')\n ]\n }\n\n\nclass CsoundScoreLexer(CsoundLexer):\n \"\"\"\n For `Csound `_ scores.\n\n .. versionadded:: 2.1\n \"\"\"\n\n name = 'Csound Score'\n aliases = ['csound-score', 'csound-sco']\n filenames = ['*.sco']\n\n tokens = {\n 'partial statement': [\n include('preprocessor directives'),\n (r'\\d+e[+-]?\\d+|(\\d+\\.\\d*|\\d*\\.\\d+)(e[+-]?\\d+)?', Number.Float),\n (r'0[xX][a-fA-F0-9]+', Number.Hex),\n (r'\\d+', Number.Integer),\n (r'\"', String, 'single-line string'),\n (r'[+\\-*/%^!=<>|&#~.]', Operator),\n (r'[]()[]', Punctuation),\n (r'\\w+', Comment.Preproc)\n ],\n\n 'statement': [\n include('whitespace or macro call'),\n newline + ('#pop',),\n include('partial statement')\n ],\n\n 'root': [\n newline,\n include('whitespace or macro call'),\n (r'[{}]', Punctuation, 'statement'),\n (r'[abefimq-tv-z]|[nN][pP]?', Keyword, 'statement')\n ],\n\n 'single-line string': [\n (r'\"', String, '#pop'),\n (r'[^\\\\\"]+', String)\n ]\n }\n\n\nclass CsoundOrchestraLexer(CsoundLexer):\n \"\"\"\n For `Csound `_ orchestras.\n\n .. versionadded:: 2.1\n \"\"\"\n\n name = 'Csound Orchestra'\n aliases = ['csound', 'csound-orc']\n filenames = ['*.orc']\n\n user_defined_opcodes = set()\n\n def opcode_name_callback(lexer, match):\n opcode = match.group(0)\n lexer.user_defined_opcodes.add(opcode)\n yield match.start(), Name.Function, opcode\n\n def name_callback(lexer, match):\n name = match.group(0)\n if re.match('p\\d+$', name) or name in OPCODES:\n yield match.start(), Name.Builtin, name\n elif name in lexer.user_defined_opcodes:\n yield match.start(), Name.Function, name\n else:\n nameMatch = re.search(r'^(g?[aikSw])(\\w+)', name)\n if nameMatch:\n yield nameMatch.start(1), Keyword.Type, nameMatch.group(1)\n yield nameMatch.start(2), Name, nameMatch.group(2)\n else:\n yield match.start(), Name, name\n\n tokens = {\n 'label': [\n (r'\\b(\\w+)(:)', bygroups(Name.Label, Punctuation))\n ],\n\n 'partial expression': [\n include('preprocessor directives'),\n (r'\\b(0dbfs|k(r|smps)|nchnls(_i)?|sr)\\b', Name.Variable.Global),\n (r'\\d+e[+-]?\\d+|(\\d+\\.\\d*|\\d*\\.\\d+)(e[+-]?\\d+)?', Number.Float),\n (r'0[xX][a-fA-F0-9]+', Number.Hex),\n (r'\\d+', Number.Integer),\n (r'\"', String, 'single-line string'),\n (r'\\{\\{', String, 'multi-line string'),\n (r'[+\\-*/%^!=&|<>#~¬]', Operator),\n (r'[](),?:[]', Punctuation),\n (words((\n # Keywords\n 'do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', 'kthen',\n 'od', 'then', 'until', 'while',\n # Opcodes that act as control structures\n 'return', 'timout'\n ), prefix=r'\\b', suffix=r'\\b'), Keyword),\n (words(('goto', 'igoto', 'kgoto', 'rigoto', 'tigoto'),\n prefix=r'\\b', suffix=r'\\b'), Keyword, 'goto label'),\n (words(('cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto'),\n prefix=r'\\b', suffix=r'\\b'), Keyword,\n ('goto label', 'goto expression')),\n (words(('loop_ge', 'loop_gt', 'loop_le', 'loop_lt'),\n prefix=r'\\b', suffix=r'\\b'), Keyword,\n ('goto label', 'goto expression', 'goto expression', 'goto expression')),\n (r'\\bscoreline(_i)?\\b', Name.Builtin, 'scoreline opcode'),\n (r'\\bpyl?run[it]?\\b', Name.Builtin, 'python opcode'),\n (r'\\blua_(exec|opdef)\\b', Name.Builtin, 'lua opcode'),\n (r'\\b[a-zA-Z_]\\w*\\b', name_callback)\n ],\n\n 'expression': [\n include('whitespace or macro call'),\n newline + ('#pop',),\n include('partial expression')\n ],\n\n 'root': [\n newline,\n include('whitespace or macro call'),\n (r'\\binstr\\b', Keyword, ('instrument block', 'instrument name list')),\n (r'\\bopcode\\b', Keyword, ('opcode block', 'opcode parameter list',\n 'opcode types', 'opcode types', 'opcode name')),\n include('label'),\n default('expression')\n ],\n\n 'instrument name list': [\n include('whitespace or macro call'),\n (r'\\d+|\\+?[a-zA-Z_]\\w*', Name.Function),\n (r',', Punctuation),\n newline + ('#pop',)\n ],\n 'instrument block': [\n newline,\n include('whitespace or macro call'),\n (r'\\bendin\\b', Keyword, '#pop'),\n include('label'),\n default('expression')\n ],\n\n 'opcode name': [\n include('whitespace or macro call'),\n (r'[a-zA-Z_]\\w*', opcode_name_callback, '#pop')\n ],\n 'opcode types': [\n include('whitespace or macro call'),\n (r'0|[]afijkKoOpPStV[]+', Keyword.Type, '#pop'),\n (r',', Punctuation)\n ],\n 'opcode parameter list': [\n include('whitespace or macro call'),\n newline + ('#pop',)\n ],\n 'opcode block': [\n newline,\n include('whitespace or macro call'),\n (r'\\bendop\\b', Keyword, '#pop'),\n include('label'),\n default('expression')\n ],\n\n 'goto label': [\n include('whitespace or macro call'),\n (r'\\w+', Name.Label, '#pop'),\n default('#pop')\n ],\n 'goto expression': [\n include('whitespace or macro call'),\n (r',', Punctuation, '#pop'),\n include('partial expression')\n ],\n\n 'single-line string': [\n include('macro call'),\n (r'\"', String, '#pop'),\n # From https://github.com/csound/csound/blob/develop/Opcodes/fout.c#L1405\n (r'%\\d*(\\.\\d+)?[cdhilouxX]', String.Interpol),\n (r'%[!%nNrRtT]|[~^]|\\\\([\\\\aAbBnNrRtT\"]|[0-7]{1,3})', String.Escape),\n (r'[^\\\\\"~$%\\^\\n]+', String),\n (r'[\\\\\"~$%\\^\\n]', String)\n ],\n 'multi-line string': [\n (r'\\}\\}', String, '#pop'),\n (r'[^}]+|\\}(?!\\})', String)\n ],\n\n 'scoreline opcode': [\n include('whitespace or macro call'),\n (r'\\{\\{', String, 'scoreline'),\n default('#pop')\n ],\n 'scoreline': [\n (r'\\}\\}', String, '#pop'),\n (r'([^}]+)|\\}(?!\\})', using(CsoundScoreLexer))\n ],\n\n 'python opcode': [\n include('whitespace or macro call'),\n (r'\\{\\{', String, 'python'),\n default('#pop')\n ],\n 'python': [\n (r'\\}\\}', String, '#pop'),\n (r'([^}]+)|\\}(?!\\})', using(PythonLexer))\n ],\n\n 'lua opcode': [\n include('whitespace or macro call'),\n (r'\"', String, 'single-line string'),\n (r'\\{\\{', String, 'lua'),\n (r',', Punctuation),\n default('#pop')\n ],\n 'lua': [\n (r'\\}\\}', String, '#pop'),\n (r'([^}]+)|\\}(?!\\})', using(LuaLexer))\n ]\n }\n\n\nclass CsoundDocumentLexer(RegexLexer):\n \"\"\"\n For `Csound `_ documents.\n\n .. versionadded:: 2.1\n \"\"\"\n\n name = 'Csound Document'\n aliases = ['csound-document', 'csound-csd']\n filenames = ['*.csd']\n\n # These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making\n # CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a\n # better idea, since Csound Document files look like XML files. However, Csound\n # Documents can contain Csound comments (preceded by //, for example) before and\n # after the root element, unescaped bitwise AND & and less than < operators, etc. In\n # other words, while Csound Document files look like XML files, they may not actually\n # be XML files.\n tokens = {\n 'root': [\n newline,\n (r'/[*](.|\\n)*?[*]/', Comment.Multiline),\n (r'[^<&;/]+', Text),\n (r'<\\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),\n (r'<\\s*CsScore', Name.Tag, ('score', 'tag')),\n (r'<\\s*[hH][tT][mM][lL]', Name.Tag, ('HTML', 'tag')),\n (r'<\\s*[\\w:.-]+', Name.Tag, 'tag'),\n (r'<\\s*/\\s*[\\w:.-]+\\s*>', Name.Tag)\n ],\n 'orchestra': [\n (r'<\\s*/\\s*CsInstruments\\s*>', Name.Tag, '#pop'),\n (r'(.|\\n)+?(?=<\\s*/\\s*CsInstruments\\s*>)', using(CsoundOrchestraLexer))\n ],\n 'score': [\n (r'<\\s*/\\s*CsScore\\s*>', Name.Tag, '#pop'),\n (r'(.|\\n)+?(?=<\\s*/\\s*CsScore\\s*>)', using(CsoundScoreLexer))\n ],\n 'HTML': [\n (r'<\\s*/\\s*[hH][tT][mM][lL]\\s*>', Name.Tag, '#pop'),\n (r'(.|\\n)+?(?=<\\s*/\\s*[hH][tT][mM][lL]\\s*>)', using(HtmlLexer))\n ],\n 'tag': [\n (r'\\s+', Text),\n (r'[\\w.:-]+\\s*=', Name.Attribute, 'attr'),\n (r'/?\\s*>', Name.Tag, '#pop')\n ],\n 'attr': [\n (r'\\s+', Text),\n (r'\".*?\"', String, '#pop'),\n (r\"'.*?'\", String, '#pop'),\n (r'[^\\s>]+', String, '#pop')\n ]\n }\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/csound.py","file_name":"csound.py","file_ext":"py","file_size_in_byte":12307,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"71046074988","text":"\n\"\"\"\nThis scripts is to remove redundancy in a collections of sequences.\ninput:\n all sequences ids in the collections\n self blast output in format 6 (blast agains it self)\noutput:\n a tsv file, which group each sequence into a cluster\n\"\"\"\n\nimport re\nimport argparse\nimport operator\nimport time\nimport pickle\nimport math\nimport networkx\n\n\nclass Graph:\n \"\"\"\n it's the base class of Graph object, containing two class as component, Vertex and Edge\n The structure of graph is based on Adjacency Map Structure.\n it also contains some fundamental methods\n \"\"\"\n class Vertex:\n \"\"\"\n _element: the name of vertex\n _visited: the status of vertex, not used in this case\n \"\"\"\n __slots__ = '_element', '_visited'\n\n def __init__(self, x):\n self._element = x\n self._visited = False\n\n def element(self):\n return self._element\n\n def is_visit(self):\n return self._visited\n\n def get_visited(self):\n self._visited = True\n\n def __hash__(self):\n return hash(id(self))\n\n class Edge:\n __slots__ = '_origin', '_destination'\n\n def __init__(self, u, v):\n \"\"\"\n :param u: the origin vertex of edge\n :param v: the end vertex of edge\n \"\"\"\n self._origin = u\n self._destination = v\n\n def endpoints(self):\n return self._origin, self._destination\n\n def opposite(self, v):\n return self._destination if v is self._origin else self._origin\n\n def __hash__(self):\n return hash((self._origin, self._destination))\n\n def __init__(self, directed=False):\n \"\"\"\n _outgoing is a dictionary of dictionary. the value inside is a certain edge.\n the outer key is the origin vertex and the inside key is the destination of edge\n _incoming is just opposite to _outgoing.\n _vertex_dict will give you the vertex object when you have the name of vertex\n :param directed: if it's a directed graph\n \"\"\"\n self._outgoing = {}\n self._incoming = {} if directed else self._outgoing\n self._vertex_dict = {}\n\n def check_reverse_edge(self, e):\n try:\n self._outgoing[e.endpoints()[1]][e.endpoints()[0]]\n return True\n except KeyError:\n return False\n\n def is_directed(self):\n return self._incoming is not self._outgoing\n\n def vertices(self):\n return self._outgoing.keys()\n\n def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n return total if self.is_directed() else total // 2\n\n def edges(self):\n result = set()\n for secondaty_map in self._outgoing.values():\n result.update(secondaty_map.values())\n return result\n\n def get_edge(self, u, v):\n u_vertex = self._vertex_dict[u]\n v_vertex = self._vertex_dict[v]\n return self._outgoing[u_vertex].get(v_vertex)\n\n def degree(self, v, outgoing=True):\n v_vertex = self._vertex_dict[v]\n adj = self._outgoing if outgoing else self._incoming\n return len(adj[v_vertex])\n\n def incident_edges(self, v, outgoing=True):\n if isinstance(v, self.Vertex):\n v_vertex = v\n else:\n v_vertex = self._vertex_dict[v]\n adj = self._outgoing if outgoing else self._incoming\n for edge in adj[v_vertex].values():\n yield edge\n\n def incident_vertex(self, v, outgoing=True):\n return self._outgoing[v].keys() if outgoing else self._incoming[v].keys()\n\n def insert_vertex(self, x=None):\n v = self.Vertex(x)\n self._vertex_dict[x] = v\n self._outgoing[v] = {}\n if self.is_directed():\n self._incoming[v] = {}\n return v\n\n def insert_edge(self, u_element, v_element, **kwargs):\n u_vertex = self._vertex_dict[u_element]\n v_vertex = self._vertex_dict[v_element]\n e = self.Edge(u_vertex, v_vertex)\n self._outgoing[u_vertex][v_vertex] = e\n self._incoming[v_vertex][u_vertex] = e\n\n\nclass BlastGraph(Graph):\n \"\"\"\n it's the graph designed for our project to store the result of blast.\n it contains a BlastEdge class, which is customized version of base Edge class. the slots '_pident' and '_qcovs' are\n from blast stats and '_score' is defined as the multiply of '_pident' and '_qcovs'\n this class also has three methods for constructing the graph from a particular format of blast result.\n \"\"\"\n class BlastEdge(Graph.Edge):\n __slots__ = '_origin', '_destination', '_pident', '_qcovs', '_evalue', '_score'\n\n def __init__(self, u, v, pident, qcovs, evalue=None):\n self._pident = pident\n self._qcovs = qcovs\n self._evalue = evalue\n if evalue:\n if float(evalue) == 0:\n evalue = \"1e-180\"\n self._score = 1 - (math.log10(float(evalue)) + 180) / (1 + 180)\n # 1 / (1 + math.exp(0.1 * (math.log10(float(evalue)) + 30)))\n else:\n self._score = float(pident) * float(qcovs) / 10000\n\n super().__init__(u, v)\n\n def get_pident(self):\n return self._pident\n\n def get_qcovs(self):\n return self._qcovs\n\n def get_score(self):\n return self._score\n\n def insert_edge(self, u, v, **kwargs):\n cutoff = min(kwargs[\"thresholds\"])\n del kwargs[\"thresholds\"]\n u_vertex = self._vertex_dict[u]\n v_vertex = self._vertex_dict[v]\n e = self.BlastEdge(u_vertex, v_vertex, **kwargs)\n if e.get_score() < cutoff:\n return 0\n else:\n self._outgoing[u_vertex][v_vertex] = e\n self._incoming[v_vertex][u_vertex] = e\n return e\n\n def read_vertex(self, file):\n with open(file, \"r\") as vertex_file:\n for cnt, line in enumerate(vertex_file):\n self.insert_vertex(line.rstrip())\n\n def read_edge(self, file, qseqid, sseqid, pident, qcovs, evalue, thresholds):\n with open(file, \"r\") as edge_file:\n for cnt, line in enumerate(edge_file):\n words = re.split(r\"\\s+\", line.rstrip())\n query_id = words[qseqid]\n subject_id = words[sseqid]\n\n if query_id != subject_id:\n # if evalue is specified, then enter evalue mode\n if evalue:\n self.insert_edge(query_id, subject_id,\n pident=words[pident], qcovs=words[qcovs],\n evalue=words[evalue], thresholds=thresholds)\n else:\n self.insert_edge(query_id, subject_id,\n pident=words[pident], qcovs=words[qcovs],\n thresholds=thresholds)\n\n '''\n for u in self._outgoing.keys():\n for v in self._outgoing[u].keys():\n if self.self._outgoing[u][v].get_score() == 1:\n try:\n if self.self._outgoing[v][u].get_score() != 1:\n del self.self._outgoing[v][u]\n except KeyError:\n continue\n '''\n\n\n# Graph Based Sequence Clustering Algorithm\nclass GBSCA(BlastGraph):\n \"\"\"\n it's the main body of our Graph Based Sequence Clustering Algorithm(GBSCA). the class Cluster contain method of\n cluster constructor, checking if a node can be added to that cluster and checking if two clusters can be merged.\n the '_cluster_nodes' slots contain a dictionary of node and their outdegree. the '_path_distance' is the shortest\n path between any two node, if reachable, in the cluster.\n GBSCA class itself also has a method dispatch to guide the analysis into certain method. for the four dispatched\n method, they all consist of four parts: firstly, initial the tested nodes and clusters and do some simple test;\n Secondly, get the in_edges and out_edges between them. Thirdly, check it they can be added/merged. Fourth, update\n the cluster and node information in GBSCA.\n \"\"\"\n class Cluster:\n __slots__ = \"_cluster_nodes\", \"_path_distance\", \"_cluster_representative\", \"_verbose\"\n\n def __init__(self, u, v, e, verbose):\n self._cluster_nodes = {u: 1, v: 0} # node as key and outdegree as value\n self._path_distance = {}\n self._cluster_representative = []\n\n self._path_distance[u] = {v: e.get_score()}\n self._path_distance[v] = {u: 0}\n self._verbose = verbose\n\n def set_verbose(self, verbose):\n self._verbose = verbose\n\n def get_cluster_nodes(self):\n return self._cluster_nodes\n\n def get_outdegree(self, u):\n return self._cluster_nodes[u]\n\n def get_path_distance(self, u):\n return self._path_distance[u]\n\n def get_shortest_distance(self, u, v=None, incoming=False):\n if incoming:\n return self._path_distance[v][u]\n else:\n return self._path_distance[u][v]\n\n def initialize_node_insertion(self, u):\n self._cluster_nodes[u] = 0\n self._path_distance[u] = {}\n\n def set_shortest_distance(self, u, v, dist):\n self._path_distance[u][v] = dist\n\n def get_reachable_nodes(self, u, incoming=False):\n \"\"\"\n :param u: certain node in the cluster\n :param incoming: it's for direction. if you want to regard u as the destination of the path,\n make \"incoming\" True.\n :return: all the node that can connect to u in certain direction\n \"\"\"\n if incoming:\n return [v for v in self.get_cluster_nodes().keys() if u in self._path_distance[v].keys()]\n else:\n return self._path_distance[u].keys()\n\n def calculate_one_to_cluster_distance(self, v, bridge_dict, edges_check, incoming=False):\n \"\"\"\n :param v: any vertex in this cluster\n :param bridge_dict: the collection of edges connect the nodes in this cluster and outside. its keys are the\n nodes in this clusters (so there is a possibility that v may appear in bridge_dict too). its values are\n the bridge edges.\n :param edges_check: threshold to check if there is any need to do a score multiply\n :param incoming: get the direction information. if the bridge edges are from outside into the clusters,\n incoming should be true. otherwise, it's false\n :return: the shortest distance from a bridges edge to node v inside the cluster.\n \"\"\"\n if v in bridge_dict.keys():\n dist = bridge_dict[v].get_score()\n else:\n dist = max([bridge_dict[t].get_score() * self.get_shortest_distance(v, t, incoming)\n for t in bridge_dict.keys()\n if self.get_shortest_distance(v, t, incoming) > edges_check] + [0])\n return dist\n\n def check_and_insert_node(self, u, cluster_threshold, in_edges, out_edges, double_edges,\n in_edges_max_score, out_edges_max_score):\n \"\"\"\n temp_in and temp_out will store all the shortest distance between u and any node in the clusters,\n if reachable. if temp_in and temp_out pass the cluster_threshold test, they can also be used to update\n \"_path_distance\" when u is added. At the same time, outdegree of any node in the clusters is also updated.\n :param u: the node to be checked\n :param cluster_threshold: threshold for the minimum _path_distance in cluster\n :param in_edges: all the edges from u to the clusters\n :param out_edges: all the edges from the clusters to u\n :param in_edges_max_score: threshold to check if there is any need to do a score multiply\n :param out_edges_max_score: threshold to check if there is any need to do a score multiply\n :return: if u is added to the cluster or not\n \"\"\"\n in_edges_check = cluster_threshold / in_edges_max_score[0]\n out_edges_check = cluster_threshold / out_edges_max_score[0]\n\n temp_in_nodes = {e.endpoints()[1]: e for e in in_edges}\n temp_out_nodes = {e.endpoints()[0]: e for e in out_edges}\n temp_in = {}\n temp_out = {}\n\n for v in self.get_cluster_nodes().keys():\n dist_in = self.calculate_one_to_cluster_distance(\n v, temp_in_nodes, in_edges_check, incoming=True\n )\n dist_out = self.calculate_one_to_cluster_distance(\n v, temp_out_nodes, out_edges_check\n )\n if 0 < dist_in < cluster_threshold and 0 < dist_out < cluster_threshold:\n if self._verbose:\n print(\"first cluster is: \" +\n \"; \".join([j.element() for j in self.get_cluster_nodes().keys()]) +\n \"\\nThe failed insertion is from: \" + u.element() + \" to \" + v.element() +\n \"\\nforward distance is \" + str(dist_in) + \"; backward distance is \" + str(dist_out))\n # fail\n return 0\n else:\n temp_in.update({v: dist_in})\n temp_out.update({v: dist_out})\n\n # success\n self.initialize_node_insertion(u)\n\n self._cluster_nodes[u] += len([1 for i in in_edges\n if i not in double_edges])\n for i in out_edges:\n if i not in double_edges:\n self._cluster_nodes[i.endpoints()[0]] += 1\n\n for v, dist in temp_in.items():\n self._path_distance[u][v] = dist\n for v, dist in temp_out.items():\n self._path_distance[v][u] = dist\n return 1\n\n def check_and_merge_cluster(self, clu_v, cluster_threshold, in_edges, out_edges, double_edges,\n in_edges_max_score, out_edges_max_score):\n \"\"\"\n the idea is similar to check_and_insert_node method. but expand the one-to-many relation to a many-to-many\n relation.\n the border nodes are the ones who are in both the endpoints of bridges edges and clu_v\n :param clu_v: the other cluster\n :param cluster_threshold: threshold for the minimum _path_distance in cluster\n :param in_edges: all the edges from clu_u to clu_v\n :param out_edges: all the edges from clu_v to clu_u\n :param in_edges_max_score: threshold to check if there is any need to do a score multiply\n :param out_edges_max_score: threshold to check if there is any need to do a score multiply\n :return: if two clusters are merged or not\n \"\"\"\n\n in_edges_check = cluster_threshold / in_edges_max_score[0]\n out_edges_check = cluster_threshold / out_edges_max_score[0]\n\n border_nodes_set = set([e.endpoints()[0] for e in in_edges] +\n [e.endpoints()[1] for e in out_edges])\n\n temp_border_dist = {}\n for i in border_nodes_set:\n temp_in_nodes = {e.endpoints()[1]: e for e in in_edges if e.endpoints()[0] == i}\n temp_out_nodes = {e.endpoints()[0]: e for e in out_edges if e.endpoints()[1] == i}\n temp_in = {}\n temp_out = {}\n\n for v in clu_v.get_cluster_nodes().keys():\n dist_in = clu_v.calculate_one_to_cluster_distance(\n v, temp_in_nodes, in_edges_check, incoming=True\n )\n dist_out = clu_v.calculate_one_to_cluster_distance(\n v, temp_out_nodes, out_edges_check\n )\n if 0 < dist_in < cluster_threshold and 0 < dist_out < cluster_threshold:\n # fail\n if self._verbose:\n print(\"The failed insertion is from: \" + i.element() + \" to \" + v.element())\n return 0\n else:\n temp_in.update({(i, v): dist_in})\n temp_out.update({(v, i): dist_out})\n\n temp_border_dist.update({i: [temp_in, temp_out]})\n\n temp_distance = {\"in\": {}, \"out\": {}}\n for i in self.get_cluster_nodes().keys():\n if i in border_nodes_set:\n temp_distance[\"in\"].update({\n pair_key: dist for pair_key, dist in temp_border_dist[i][0].items()\n })\n temp_distance[\"out\"].update({\n pair_key: dist for pair_key, dist in temp_border_dist[i][1].items()\n })\n else:\n for v in clu_v.get_cluster_nodes().keys():\n # only if the edge with score more than edge_check will get multiplication,\n # which is the most time-consuming step.\n dist_in = max([self.get_shortest_distance(i, border_node) *\n temp_border_dist[border_node][0][(border_node, v)]\n for border_node in border_nodes_set\n if self.get_shortest_distance(i, border_node) > in_edges_check] + [0])\n dist_out = max([self.get_shortest_distance(border_node, i) *\n temp_border_dist[border_node][1][(v, border_node)]\n for border_node in border_nodes_set\n if self.get_shortest_distance(border_node, i) > out_edges_check] + [0])\n if 0 < dist_in < cluster_threshold and 0 < dist_out < cluster_threshold:\n if self._verbose:\n print(\"The failed insertion is from: \" + i.element() + \" to \" + v.element())\n # fail\n return 0\n else:\n temp_distance[\"in\"].update({(i, v): dist_in})\n temp_distance[\"out\"].update({(v, i): dist_out})\n\n # success\n for i in clu_v.get_cluster_nodes().keys():\n self._cluster_nodes[i] = clu_v.get_outdegree(i)\n self._path_distance[i] = clu_v.get_path_distance(i)\n\n for pair_key in temp_distance[\"in\"]:\n self._path_distance[pair_key[0]][pair_key[1]] = temp_distance[\"in\"][pair_key]\n\n for pair_key in temp_distance[\"out\"]:\n self._path_distance[pair_key[0]][pair_key[1]] = temp_distance[\"out\"][pair_key]\n\n for i in out_edges:\n if i not in double_edges:\n self._cluster_nodes[i.endpoints()[0]] += 1\n for i in in_edges:\n if i not in double_edges:\n self._cluster_nodes[i.endpoints()[0]] += 1\n return 1\n\n def show_cluster_component(self, out_file=None):\n \"\"\"\n This method will show you the nodes, edges information in a cluster\n :param out_file: if you want to push the output to a file, specify the file name here\n :return: None\n \"\"\"\n nodes = self.get_cluster_nodes().keys()\n edges = []\n for i in nodes:\n edges.append(\"\\n\".join([i.element() + \"\\t\" + j.element() + \"\\t\" + str(self.get_shortest_distance(i, j))\n for j in self.get_reachable_nodes(i)]))\n\n msg = \"nodes are\\n\" + \"\\n\".join(map(operator.methodcaller('element'), nodes)) + \\\n \"\\nedges are\\n\" + \"\\n\".join(edges)\n\n if out_file:\n out_fh = open(out_file, \"w+\")\n out_fh.write(msg)\n else:\n print(msg)\n\n\n\n def __init__(self, verbose=False, directed=True, out_degree=3):\n \"\"\"\n constructor of GBSCA\n :param verbose: if you need the intermediate result\n :param directed: if it's a directed graph\n \"\"\"\n super().__init__(directed)\n self._VisitedVertex = {}\n self._NotVisitedEdges = set()\n self._cluster_collection = set()\n self._verbose = verbose\n self._cluster_count = 0\n # out degree checking\n self._start_to_checking_outdegree = out_degree\n\n def set_verbose(self, verbose):\n self._verbose = verbose\n\n def get_visited_vertex(self):\n # also include their clusters\n return self._VisitedVertex.keys()\n\n def get_cluster(self, u_element=None):\n if u_element:\n u_vertex = self._vertex_dict[u_element]\n return self._VisitedVertex[u_vertex]\n else:\n return self._cluster_collection\n\n def get_number_of_clusters(self):\n return self._cluster_count\n\n def visiting_vertex(self, u, clu):\n self._VisitedVertex[u] = clu\n if u not in clu.get_cluster_nodes().keys():\n raise ValueError(\"how come\")\n\n def visiting_edge(self, e):\n if isinstance(e, list):\n for i in e:\n if i in self._NotVisitedEdges:\n self._NotVisitedEdges.remove(i)\n else:\n if e in self._NotVisitedEdges:\n self._NotVisitedEdges.remove(e)\n\n def insert_edge(self, u, v, **kwargs):\n e = super().insert_edge(u, v, **kwargs)\n if e and e.get_score() > max(kwargs[\"thresholds\"]):\n self._NotVisitedEdges.add(e)\n\n def remove_low_quality_edges(self, thresholds):\n cutoff = min(thresholds)\n deleted_edges = set()\n for i in self._NotVisitedEdges:\n if i.get_score() <= cutoff:\n deleted_edges.add(i)\n del self._outgoing[i.endpoints()[0]][i.endpoints()[1]]\n del self._incoming[i.endpoints()[1]][i.endpoints()[0]]\n self._NotVisitedEdges -= deleted_edges\n\n def get_bridge_edges(self, u, clu_v):\n if isinstance(u, self.Vertex):\n return self.get_in_and_out_edges(u, clu_v)\n\n if isinstance(u, self.Cluster):\n edge_info = {\"in_edges\": [], \"out_edges\": [], \"double_edges\": [],\n \"in_edges_max_score\": [], \"out_edges_max_score\": []}\n for node in u.get_cluster_nodes().keys():\n res = self.get_in_and_out_edges(node, clu_v)\n for i in edge_info.keys():\n edge_info[i].extend(res[i])\n return edge_info\n\n def get_in_and_out_edges(self, u, clu_v):\n in_edges = []\n in_edges_max_score = 0\n out_edges = []\n out_edges_max_score = 0\n for x, f in self._outgoing[u].items():\n if x in clu_v.get_cluster_nodes().keys():\n in_edges.append(f)\n in_edges_max_score = max(in_edges_max_score, f.get_score())\n for x, f in self._incoming[u].items():\n if x in clu_v.get_cluster_nodes().keys():\n out_edges.append(f)\n out_edges_max_score = max(out_edges_max_score, f.get_score())\n\n double_edges = []\n for i in in_edges:\n if self.check_reverse_edge(i):\n double_edges.append(i)\n double_edges.append(self._outgoing[i.endpoints()[1]][i.endpoints()[0]])\n return {\"in_edges\": in_edges, \"out_edges\": out_edges, \"double_edges\": double_edges,\n \"in_edges_max_score\": [in_edges_max_score if in_edges_max_score != 0 else 1],\n \"out_edges_max_score\": [out_edges_max_score if out_edges_max_score != 0 else 1]}\n\n def check_graph_integrity(self, clu):\n key_list = [j for j in self._VisitedVertex.keys() if self._VisitedVertex[j] == clu]\n for i in key_list:\n if i not in clu.get_cluster_nodes().keys():\n raise ValueError(\"there is a key missing\")\n\n for i in clu.get_cluster_nodes().keys():\n if i not in key_list:\n raise ValueError(\"there is a key missing\")\n\n def get_candidate_edges(self, edge_threshold):\n print(\"the size of not visited edges are \" + str(len(self._NotVisitedEdges)))\n candidate_edges = [e for e in self._NotVisitedEdges if e.get_score() > edge_threshold]\n\n return candidate_edges\n\n def determine_cluster(self, e, cluster_threshold):\n u, v = e.endpoints()\n self.visiting_edge(e)\n method = self.dispatch_determine_method(u, v)\n eval(\"self.\"+method+\"(u, v, e, cluster_threshold)\")\n\n def dispatch_determine_method(self, u, v):\n cluster_type = [u not in self.get_visited_vertex(), v not in self.get_visited_vertex()]\n if self._verbose:\n msg = [re.sub(r\"False\", \"a cluster\", re.sub(r\"True\", \"a node\", str(i)))\n for i in cluster_type]\n print(\"\\t\".join(msg))\n if cluster_type == [True, True]:\n return \"node_to_node\"\n\n if cluster_type == [True, False]:\n return \"node_to_cluster\"\n\n if cluster_type == [False, True]:\n return \"cluster_to_node\"\n\n if cluster_type == [False, False]:\n return \"cluster_to_cluster\"\n\n def node_to_node(self, u, v, e, cluster_threshold):\n if e.get_score() > cluster_threshold:\n clu = self.Cluster(u, v, e, verbose=self._verbose)\n self._cluster_collection.add(clu)\n self._cluster_count += 1\n\n self.visiting_vertex(u, clu)\n self.visiting_vertex(v, clu)\n if self._verbose:\n print(\"successfully initialize a cluster with two seed nodes: \" + u.element() + \" and \" + v.element())\n\n def node_to_cluster(self, u, v, e, cluster_threshold):\n clu = self._VisitedVertex[v]\n edge_information = self.get_bridge_edges(u, clu)\n\n if clu.check_and_insert_node(u, cluster_threshold, **edge_information):\n self.visiting_vertex(u, clu)\n self.visiting_edge(edge_information[\"in_edges\"])\n self.visiting_edge(edge_information[\"out_edges\"])\n if self._verbose:\n print(\"successfully insert the node \" + v.element() + \" into cluster\")\n else:\n if self._verbose:\n print(\"Unsuccessfully insert the node \" + v.element() + \" into cluster\")\n\n def cluster_to_node(self, u, v, e, cluster_threshold):\n clu = self._VisitedVertex[u]\n\n # when the cluster has more nodes than a check point,\n # then only if the the origin of bridge edge, which is in clu cluster,\n # has a outdegree equal to 0, we will process it further\n # the aim of outdegree check is to avoid the situation that two distinct transcripts\n # are falsely grouped together because there is one exons shared between them\n # if clu.get_outdegree(u) != 0 and \\\n # len(clu.get_cluster_nodes().keys()) > self._start_to_checking_outdegree:\n # return 0\n\n edge_information = self.get_bridge_edges(v, clu)\n\n if clu.check_and_insert_node(v, cluster_threshold, **edge_information):\n self.visiting_vertex(v, clu)\n self.visiting_edge(edge_information[\"in_edges\"])\n self.visiting_edge(edge_information[\"out_edges\"])\n if self._verbose:\n print(\"successfully insert the node \" + v.element() + \" into cluster\")\n else:\n if self._verbose:\n print(\"Unsuccessfully insert the node \" + v.element() + \" into cluster\")\n\n def cluster_to_cluster(self, u, v, e, cluster_threshold):\n clu_u = self._VisitedVertex[u]\n clu_v = self._VisitedVertex[v]\n\n if clu_u == clu_v:\n return 0\n\n # when the cluster has more nodes than a check point,\n # then only if the the origin of bridge edge, which is in clu_v cluster,\n # has a outdegree equal to 0, we will consider it further\n # if clu_v.get_outdegree(v) != 0 and \\\n # len(clu_v.get_cluster_nodes().keys()) > self._start_to_checking_outdegree:\n # return 0\n\n edge_information = self.get_bridge_edges(clu_u, clu_v)\n\n if clu_u.check_and_merge_cluster(clu_v, cluster_threshold, **edge_information):\n self.visiting_edge(edge_information[\"in_edges\"])\n self.visiting_edge(edge_information[\"out_edges\"])\n for i in clu_v.get_cluster_nodes().keys():\n self._VisitedVertex[i] = clu_u\n self._cluster_collection.remove(clu_v)\n self._cluster_count -= 1\n\n if self._verbose:\n print(\"successfully merge two clusters\")\n else:\n if self._verbose:\n print(\"Unsuccessfully merge two clusters\")\n\n def train_part(self, initial_edge_threshold, final_edge_threshold, cluster_threshold):\n edge_threshold = initial_edge_threshold\n lower_amp = (initial_edge_threshold - final_edge_threshold) / 100\n while edge_threshold > final_edge_threshold:\n time_tmp = time.time()\n candidate_edges = list(self.get_candidate_edges(edge_threshold))\n # sort the candidate edges by their scores\n candidate_edges.sort(key=operator.methodcaller('get_score'), reverse=True)\n print(\"first part takes \" + str(time.time() - time_tmp))\n time_tmp = time.time()\n while candidate_edges:\n # process the edges one by one\n test_edge = candidate_edges.pop(0)\n self.determine_cluster(test_edge, cluster_threshold)\n\n print(\"second part takes \" + str(time.time() - time_tmp))\n edge_threshold = edge_threshold - lower_amp\n print(self.get_number_of_clusters())\n print(\"edge_threshold = \" + str(edge_threshold))\n print()\n\n def print_cluster_report(self, out_base=None):\n cnt = 0\n clu_nodes = []\n output_cluster = open(out_base + \"_clusters.txt\", \"w\")\n for clu in self._cluster_collection:\n cnt += 1\n clu_nodes.append(len(clu.get_cluster_nodes().keys()))\n\n output_cluster.write(\"\\n\".join([str(i) + \"\\tCluster_\" + str(cnt)\n for i in\n map(operator.methodcaller('element'), clu.get_cluster_nodes().keys())])\n + \"\\n\")\n\n print(\"there are \" + str(cnt) + \" clusters.\\nTheir number of nodes are \"\n + \", \".join([str(i) for i in clu_nodes]) +\n \"\\n\" + str(sum(clu_nodes)) + \" nodes are involved in the clusters\")\n\n def check_with_known_isoforms(self):\n luci_gene_id = \"TRINITY_DN8457_c0_g1\"\n ribo_gene_id = \"TRINITY_DN3636_c0_g1\"\n check_luci = set()\n check_ribo = set()\n for clu in self._cluster_collection:\n for i in map(operator.methodcaller('element'), clu.get_cluster_nodes().keys()):\n if re.match(luci_gene_id, i):\n check_luci.add(clu)\n if re.match(ribo_gene_id, i):\n check_ribo.add(clu)\n\n print(\"the luciferases are in \" + str(len(check_luci)) + \" different clusters\")\n print(\"the ribosomals are in \" + str(len(check_ribo)) + \" different clusters\")\n return check_luci, check_ribo\n\n def plot_a_cluster(self, clu):\n nodes_set = clu.get_cluster_nodes().keys()\n G = networkx.DiGraph()\n G.add_nodes_from(nodes_set)\n\n edges_set = []\n for i in nodes_set:\n edges_set.extend([e.endpoints() for e in self.incident_edges(i) if e.endpoints()[1] in nodes_set])\n G.add_edges_from(edges_set)\n networkx.draw_circular(G, labels={i: re.sub(r\"TRINITY_DN\", \"\", i.element()) for i in nodes_set})\n\n def plot_adjacent(self, node):\n node_obj = self._vertex_dict[node]\n\n G = networkx.DiGraph()\n incident_node_set = set()\n for i in self.incident_vertex(node_obj, outgoing=True):\n incident_node_set.add(i)\n\n for i in self.incident_vertex(node_obj, outgoing=False):\n incident_node_set.add(i)\n\n G.add_nodes_from(incident_node_set)\n\n edges_col = {}\n for e in self._outgoing[node_obj].values():\n G.add_edge(e.endpoints()[0], e.endpoints()[1], weights=round(e.get_score(), 3), color='r')\n edges_col[e] = \"r\"\n\n for e in self._incoming[node_obj].values():\n G.add_edge(e.endpoints()[0], e.endpoints()[1], weights=round(e.get_score(), 3), color='b')\n edges_col[e] = \"b\"\n\n pos = networkx.circular_layout(G) # positions for all nodes\n\n # nodes\n networkx.draw_networkx_nodes(G, pos, node_size=200)\n\n # edges\n networkx.draw_networkx_edges(G, pos, width=2)\n networkx.draw_networkx_edge_labels(G, pos, edge_labels=networkx.get_edge_attributes(G, 'weights'))\n\n # labels\n networkx.draw_networkx_labels(G, pos, font_size=10, font_family='sans-serif',\n labels={i: re.sub(r\"TRINITY_DN\", \"\", i.element()) for i in incident_node_set})\n\n\ndef parse_blast_outfmt(outfmt):\n \"\"\"\n :param outfmt: the output format specified during blast\n :return: the location of column qseqid, sseqid, pident, qcovs\n \"\"\"\n column = re.split(r\"\\s+\", outfmt)\n format_number = [i for i in column if re.match(r\"^\\d+$\", i)]\n if format_number == [\"6\"] or format_number == []:\n init = 1 if format_number == [\"6\"] else 0\n try:\n qseqid = column.index('qseqid') - init\n sseqid = column.index('sseqid') - init\n pident = column.index('pident') - init\n qcovs = column.index('qcovs') - init\n evalue = column.index('evalue') - init\n return [qseqid, sseqid, pident, qcovs, evalue]\n except ValueError as err:\n print('please specify valid blast column id. the error message is: ', err)\n else:\n raise ValueError(\"outfmt should be format 6 with appropriate column names\")\n\n\ndef main(blast_id, blast_dir, out_base, outfmt, use_evalue,\n cluster_threshold, initial_edge_threshold, final_edge_threshold):\n\n start_time = time.time()\n qseqid, sseqid, pident, qcovs, evalue = parse_blast_outfmt(outfmt)\n if not use_evalue:\n evalue = None\n\n # start to construct the graph\n my_graph = GBSCA()\n my_graph.read_vertex(blast_id)\n my_graph.read_edge(blast_dir, qseqid, sseqid, pident, qcovs, evalue, [cluster_threshold, final_edge_threshold])\n print(\"Graph initialization is finished.\")\n\n # begin with the most stringent threshold and gradually lower it down until it hit the final threshold\n my_graph.train_part(initial_edge_threshold, final_edge_threshold, cluster_threshold)\n\n my_graph.print_cluster_report(out_base)\n my_graph.check_with_known_isoforms()\n print(\"Overall time is \" + str(time.time() - start_time))\n\n with open(out_base + \"_GBSCA.obj\", 'wb') as clusters_file:\n pickle.dump(my_graph, clusters_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--blastID', required=True,\n help='The input file directory where your blast id are stored')\n parser.add_argument('--blastDir', required=True,\n help='The input file directory where your blast result are stored')\n parser.add_argument('--outBase', required=True,\n help='The base name for the output files')\n parser.add_argument('--outfmt',\n default='6 qseqid sseqid bitscore qcovs evalue pident qstart qend sstart send length qlen',\n help='blast output format')\n parser.add_argument('--use_evalue', action=\"store_true\", help='')\n parser.add_argument('--cluster_threshold', default=0.6, type=float,\n help='used to determine whether a node can be added to a cluster or two clusters can be merged')\n parser.add_argument('--initial_edge_threshold', default=0.98, type=float,\n help='filter candidate edges that serve as seed')\n parser.add_argument('--final_edge_threshold', default=0.8, type=float,\n help='filter candidate edges that serve as seed')\n parser.add_argument('--verbose',\n action=\"store_true\",\n help='if we need to print out some intermediate result')\n\n argument = parser.parse_args()\n main(argument.blastID, argument.blastDir, argument.outBase, argument.outfmt, argument.use_evalue,\n argument.cluster_threshold, argument.initial_edge_threshold, argument.final_edge_threshold)\n\n\n\n\n\n\n","repo_name":"chenpoi/CodeExample","sub_path":"PythonExample/my_graph_algorithm_with_OOP.py","file_name":"my_graph_algorithm_with_OOP.py","file_ext":"py","file_size_in_byte":37203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21868665234","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: sinannasir\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import cycle\r\nimport json\r\nimport matplotlib\r\nmatplotlib.use('Qt5Agg')\r\nimport argparse\r\n\r\ndef main(scenario): \r\n json_file = scenario['json_file']\r\n json_file_policy = scenario['json_file_policy']\r\n num_sim = scenario['num_sim']\r\n with open ('./config/deployment/'+json_file+'.json','r') as f:\r\n options = json.load(f)\r\n \r\n ## Kumber of samples\r\n total_samples = options['simulation']['total_samples']\r\n \r\n K = options['simulation']['K']\r\n N = options['simulation']['N']\r\n \r\n \r\n if num_sim == -1:\r\n num_simulations = options['simulation']['num_simulations']\r\n simulation = options['simulation']['simulation_index_start']\r\n else:\r\n num_simulations = 1\r\n simulation = num_sim\r\n \r\n # simulation parameters\r\n mobility_params = options['mobility_params']\r\n mobility_params['alpha_angle'] = options['mobility_params']['alpha_angle_rad'] * np.pi #radian/sec\r\n \r\n history = 250\r\n \r\n \r\n mean_p_FP = np.zeros(total_samples)\r\n mean_time_FP = np.zeros(total_samples)\r\n mean_iterations_FP = np.zeros(total_samples)\r\n mean_sum_rate_FP = np.zeros(total_samples)\r\n mean_p_WMMSE = np.zeros(total_samples)\r\n mean_time_WMMSE = np.zeros(total_samples)\r\n mean_iterations_WMMSE = np.zeros(total_samples)\r\n mean_sum_rate_WMMSE = np.zeros(total_samples)\r\n \r\n mean_sum_rate_delayed_central = np.zeros(total_samples)\r\n mean_sum_rate_random = np.zeros(total_samples)\r\n mean_sum_rate_max = np.zeros(total_samples)\r\n \r\n mean_sum_rate_policy_train_innersims = np.zeros(total_samples)\r\n mean_p_strategy_all_train_innersims = np.zeros(total_samples)\r\n \r\n mean_time_optimization_at_each_slot_takes = []\r\n mean_time_calculating_strategy_takes = []\r\n \r\n for overal_sims in range(simulation,simulation+num_simulations):\r\n # Get the benchmarks.\r\n file_path = './simulations/sumrate/benchmarks/%s_network%d'%(json_file,overal_sims)\r\n data = np.load(file_path+'.npz')\r\n p_FP = data['arr_0']\r\n time_stats_FP = data['arr_1']\r\n sum_rate_FP = data['arr_2']\r\n p_WMMSE = data['arr_3']\r\n time_stats_WMMSE= data['arr_4']\r\n sum_rate_WMMSE = data['arr_5']\r\n sum_rate_delayed_central = data['arr_6']\r\n sum_rate_random = data['arr_7']\r\n sum_rate_max = data['arr_8']\r\n \r\n file_path = './simulations/sumrate/train/%s_%s_network%d.ckpt'%(json_file,json_file_policy,overal_sims)\r\n data = np.load(file_path+'.npz')\r\n # Get the train policy results\r\n sum_rate_policy_train = data['arr_2']\r\n p_strategy_all = data['arr_3']\r\n time_optimization_at_each_slot_takes = data['arr_4']\r\n time_calculating_strategy_takes = data['arr_5']\r\n \r\n # Average\r\n mean_p_FP = mean_p_FP + np.sum(p_FP,1)/float(num_simulations)\r\n mean_time_FP = mean_time_FP + time_stats_FP[:,0]/float(num_simulations)\r\n mean_iterations_FP = mean_iterations_FP + time_stats_FP[:,1]/float(num_simulations)\r\n mean_sum_rate_FP = mean_sum_rate_FP + sum_rate_FP/float(num_simulations)\r\n mean_p_WMMSE = mean_p_WMMSE + np.sum(p_WMMSE,1)/float(num_simulations)\r\n mean_time_WMMSE = mean_time_WMMSE + time_stats_WMMSE[:,0]/float(num_simulations)\r\n mean_iterations_WMMSE = mean_iterations_WMMSE + time_stats_WMMSE[:,1]/float(num_simulations)\r\n mean_sum_rate_WMMSE = mean_sum_rate_WMMSE + sum_rate_WMMSE/float(num_simulations)\r\n \r\n mean_sum_rate_delayed_central = mean_sum_rate_delayed_central + sum_rate_delayed_central/float(num_simulations)\r\n mean_sum_rate_random = mean_sum_rate_random + sum_rate_random/float(num_simulations)\r\n mean_sum_rate_max = mean_sum_rate_max + sum_rate_max/float(num_simulations)\r\n \r\n mean_sum_rate_policy_train_innersims = mean_sum_rate_policy_train_innersims + sum_rate_policy_train/float(num_simulations)\r\n mean_p_strategy_all_train_innersims = mean_p_strategy_all_train_innersims + np.sum(p_strategy_all,1)/float(num_simulations)\r\n \r\n mean_time_optimization_at_each_slot_takes.append(time_optimization_at_each_slot_takes)\r\n mean_time_calculating_strategy_takes.append(time_calculating_strategy_takes)\r\n \r\n #print('K '+ str(int(N))+' R '+str(R_defined)+ ' r '+str(min_dist) + ' '+file_path[14:18])\r\n #print('Test Sum rate wmmse ' + str(np.mean(mean_sum_rate_WMMSE[total_samples-2500:]/N)))\r\n #print('Test Sum rate optimal ' + str(np.mean(mean_sum_rate[total_samples-2500:]/N)))\r\n #print('Test Sum rate delayed ' + str(np.mean(mean_sum_rate_delayed_central[total_samples-2500:]/N)))\r\n #print('Test Sum rate random ' + str(np.mean(mean_sum_rate_random[total_samples-2500:]/N)))\r\n #print('Test Sum rate max ' + str(np.mean(mean_sum_rate_max[total_samples-2500:]/N)))\r\n #for i in range(len(power_multiplier_allsims)):\r\n # print('Multiplier '+str(power_multiplier_allsims[i])+\r\n # ' Test Sum rate ' +str(np.mean(mean_sum_rate_policy_train_innersims[i,total_samples-2500:]/N)))\r\n \r\n lines = [\"-\",\"--\",':','-.',':','-.']\r\n linecycler = cycle(lines)\r\n history = 100\r\n fig = plt.figure()\r\n \r\n t=np.arange(0,total_samples,10)\r\n \r\n sum_rate_performance_FP = []\r\n sum_rate_performance_random = []\r\n sum_rate_performance_max = []\r\n sum_rate_performance_delayed_central = []\r\n sum_rate_performance_policy = []\r\n sum_rate_performance_wmmse = []\r\n sum_rate_performance_policy = []\r\n \r\n ep_start = 0\r\n for i in range(len(t)):\r\n if t[i] % options['train_episodes']['T_train'] == 0:\r\n ep_start = t[i]\r\n sum_rate_performance_FP.append(np.mean(mean_sum_rate_FP[max(ep_start,t[i]-history):t[i]]))\r\n sum_rate_performance_random.append(np.mean(mean_sum_rate_random[max(ep_start,t[i]-history):t[i]]))\r\n sum_rate_performance_max.append(np.mean(mean_sum_rate_max[max(ep_start,t[i]-history):t[i]]))\r\n sum_rate_performance_delayed_central.append(np.mean(mean_sum_rate_delayed_central[max(ep_start,t[i]-history):t[i]]))\r\n sum_rate_performance_wmmse.append(np.mean(mean_sum_rate_WMMSE[max(ep_start,t[i]-history):t[i]]))\r\n sum_rate_performance_policy.append(np.mean(mean_sum_rate_policy_train_innersims[max(ep_start,t[i]-history):t[i]]))\r\n \r\n \r\n #plt.figure(figsize=(5,5))\r\n t=np.arange(0,total_samples,10)\r\n plt.plot(t, np.array(sum_rate_performance_wmmse)/float(N), label='WMMSE',linestyle=next(linecycler))\r\n plt.plot(t, np.array(sum_rate_performance_FP)/float(N), label='FP',linestyle=next(linecycler))\r\n plt.plot(t, np.array(sum_rate_performance_delayed_central)/float(N), label='FP w delay',linestyle=next(linecycler))\r\n plt.plot(t, np.array(sum_rate_performance_random)/float(N), label='random',linestyle=next(linecycler))\r\n plt.plot(t, np.array(sum_rate_performance_max)/float(N),'c', label='full-power',linestyle=next(linecycler))\r\n plt.plot(t, np.array(sum_rate_performance_policy)/float(N), label='matched policy',linestyle=next(linecycler))# with Multiplier '+str(power_multiplier_allsims[i]),linestyle=next(linecycler))\r\n \r\n plt.xlabel('training iterations')\r\n plt.ylabel('moving average spectral efficiency (bps/Hz) per link')\r\n plt.grid(True)\r\n plt.legend(loc=4)\r\n plt.tight_layout()\r\n plt.savefig('./fig/spectraleff_%s_network_%d'%(json_file,overal_sims)+'.pdf', format='pdf', dpi=1000)\r\n plt.savefig('./fig/spectraleff_%s_network_%d'%(json_file,overal_sims)+'.png', format='png', dpi=1000)\r\n plt.show(block=False)\r\n \r\n # Average performance of the last 200 training slots.\r\n history = 200\r\n print('Deployment: %s; policy: %s; K: %d; N: %d'%(json_file,json_file_policy,N,K))\r\n print('Averages for last %d episodes:'%(history))\r\n print('Sum rate per link - policy: %.2f'%(np.mean(mean_sum_rate_policy_train_innersims[total_samples-history:])/float(N)))\r\n print('Sum rate per link - WMMSE: %.2f'%(np.mean(mean_sum_rate_WMMSE[total_samples-history:])/float(N)))\r\n print('Sum rate per link - FP: %.2f'%(np.mean(mean_sum_rate_FP[total_samples-history:])/float(N)))\r\n print('Sum rate per link - FP w delay: %.2f'%(np.mean(mean_sum_rate_delayed_central[total_samples-history:])/float(N)))\r\n print('Sum rate per link - random: %.2f'%(np.mean(mean_sum_rate_random[total_samples-history:])/float(N)))\r\n print('Sum rate per link - full: %.2f'%(np.mean(mean_sum_rate_max[total_samples-history:])/float(N)))\r\n \r\n # Average time statistics\r\n print('Average time for a WMMSE run: %.2f ms'%(1000 * np.mean(mean_time_WMMSE)))\r\n print('Average time for an FP run: %.2f ms'%(1000 * np.mean(mean_time_FP)))\r\n print('Average time for a policy agent to determine its action %.2f ms'%(1000 * np.mean(mean_time_calculating_strategy_takes)))\r\n print('Average time for a policy mini-batch train %.2f ms'%(1000 * np.mean(mean_time_optimization_at_each_slot_takes)))\r\n print('Average WMMSE iterations per run: %.2f'%(np.mean(mean_iterations_WMMSE)))\r\n print('Average FP iterations per run: %.2f'%(np.mean(mean_iterations_FP)))\r\n \r\nif __name__ == \"__main__\": \r\n \r\n parser = argparse.ArgumentParser(description='give test scenarios.')\r\n parser.add_argument('--json-file', type=str, default='train_K5_N10_shadow10_episode2-5000_travel50000_vmax2_5',\r\n help='json file for the deployment the policies are tested on')\r\n parser.add_argument('--json-file-policy', type=str, default='ddpg200_100_50',\r\n help='json file for the hyperparameters')\r\n parser.add_argument('--num-sim', type=int, default=0,\r\n help='If set to -1, it uses num_simulations of the json file. If set to positive, it runs one simulation with the given id.')\r\n \r\n args = parser.parse_args()\r\n \r\n test_scenario = {'json_file':args.json_file,\r\n 'json_file_policy':args.json_file_policy,\r\n 'num_sim':args.num_sim}\r\n main(test_scenario)","repo_name":"sinannasir/Power-Control-asilomar","sub_path":"train_results.py","file_name":"train_results.py","file_ext":"py","file_size_in_byte":10277,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"39306445067","text":"import os\r\nimport random\r\nimport shutil\r\n\r\nfrom make_dataset import *\r\n\r\ndef train_test_split_frames(dataset_folder, labels_folder,image_folder,split):\r\n os.makedirs(dataset_folder, exist_ok=True)\r\n\r\n file_names = os.listdir(labels_folder)\r\n \r\n random.shuffle(file_names)\r\n split_point = int(split * len(file_names))\r\n\r\n train_test_labels = [file_names[:split_point],file_names[split_point:]]\r\n\r\n\r\n print(train_test_labels)\r\n label_type = {0:'train',1:'test'}\r\n \r\n for i, labels in enumerate(train_test_labels):\r\n sub_dataset_folder = f'{dataset_folder}\\\\{label_type[i]}'\r\n print(i)\r\n make_dataset(labels, image_folder, sub_dataset_folder)\r\n","repo_name":"jacobmrivera/YOLO-Object-Detection-Project","sub_path":"pipeline/data_preprocessing/train_test_split_frames.py","file_name":"train_test_split_frames.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30149181881","text":"from collections import deque\n\n# 보드테이블 크기 입력받기 N X N\nn = int(input())\n\n# 사과 입력 받기 \napple_cnt = int(input())\napple = []\nfor _ in range(apple_cnt):\n x, y = map(int, input().split())\n apple.append((x-1,y-1))\n\n# 이동리스트 입력 받기\nmove_cnt = int(input())\nmove_list = deque()\nfor _ in range(move_cnt):\n x, y = input().split()\n move_list.append((int(x), y))\n\n# 방향연산배열\ndirection = [\n (-1, 0),\n (0, 1),\n (1, 0),\n (0, -1) \n]\n\n# 방향을 방향연산배열 인덱스로 컨버팅\ndef dir_to_idx(dir):\n if dir == 'U':\n return 0\n elif dir == 'R':\n return 1\n elif dir == 'D':\n return 2\n elif dir == 'L':\n return 3\n\n# 뱀 입력 받기\nsnake = deque([(0, 0)])\ndir = 'R'\n\n# 회전\ndef rotate(dir_ifno):\n global dir\n if dir == 'U':\n if dir_ifno == 'L':\n dir = 'L'\n elif dir_ifno == 'D':\n dir = 'R'\n elif dir == 'R':\n if dir_ifno == 'L':\n dir = 'U'\n elif dir_ifno == 'D':\n dir = 'D'\n elif dir == 'D':\n if dir_ifno == 'L':\n dir = 'R'\n elif dir_ifno == 'D':\n dir = 'L'\n elif dir == 'L':\n if dir_ifno == 'L':\n dir = 'D'\n elif dir_ifno == 'D':\n dir = 'U'\n\n# 이동\ndef move():\n x, y = snake[len(snake)-1] # 뱀의 머리 좌표\n idx = dir_to_idx(dir)\n\n dx = x + direction[idx][0]\n dy = y + direction[idx][1]\n\n print(\"이동위치 : \", dx, dy)\n # 배열 벗어나는지 체크\n if dx >= n or dy >= n or dx < 0 or dy < 0:\n print(\"범위 초과!!!!!!!!!\")\n return False\n\n # 꼬리에 닿는지 체크\n if (dx, dy) in snake:\n print(\"머리가 꼬리에 닿았다!!!!!\")\n return False\n\n snake.append((dx, dy))\n\n # 사과 체크\n if (dx, dy) in apple:\n print(\"eat apple\")\n else :\n snake.popleft()\n return True\n\ndef solution():\n time = 0\n\n while True:\n time += 1\n print(time, \" ===========\", snake)\n \n if len(move_list) > 0:\n cur_move = move_list.popleft()\n\n if not move():\n return time\n\n if cur_move[0] == time:\n rotate(cur_move[1])\n else :\n move_list.appendleft(cur_move)\n\nprint(solution())\n\n\n\n'''\n6\n3\n3 4\n2 5\n5 3\n3\n3 D\n15 L\n17 D\n=> 9\n\n10\n4\n1 2\n1 3\n1 4\n1 5\n4\n8 D\n10 D\n11 D\n13 L\n=> 21\n\n10\n5\n1 5\n1 3\n1 2\n1 6\n1 7\n4\n8 D\n10 D\n11 D\n13 L\n=> 13\n'''","repo_name":"Imseungbae/algorithm","sub_path":"나동빈/유형별 기출문제풀이/11.뱀_2.py","file_name":"11.뱀_2.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6937602437","text":"#!/usr/bin/env python3\n\nimport shutil\nimport subprocess\nimport sys\n\nnpm = sys.argv[1]\npackage_json = sys.argv[2]\npkglock_json = sys.argv[3]\noutput_dir = sys.argv[4]\n\nshutil.copy(package_json, output_dir)\nshutil.copy(pkglock_json, output_dir)\nprocess = subprocess.Popen([npm, \"install\"],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=output_dir)\n(stdout_data, stderr_data) = process.communicate()\nexit_code = process.returncode\nif exit_code != 0:\n sys.stderr.write(stdout_data)\nsys.exit(exit_code)\n","repo_name":"XLsn0w/Cydia","sub_path":"frida/frida-cycript/src/fetch-node-modules.py","file_name":"fetch-node-modules.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":916,"dataset":"github-code","pt":"37"} +{"seq_id":"7881942915","text":"print('*** For Loops ***')\n# * --- For Loops --- *\n\ncats = ['Maine Coon', 'Ragdoll', 'Norwegian Forest Cat']\nfor cat in cats: # name a variable to call everytime it loops\n print(cat) # the item i want to print\n\"\"\" Print Output:\n Maine Coon\n Ragdoll\n Norwegian Forest Cat\n\"\"\"\n\nprint('')\n\nages = [12, 18, 24]\nfor age in ages:\n if age >= 18:\n print(f\"You're {age}-years-old? You are old enough\")\n else:\n print(f\"You're {age}-years-old? NOT old enough.\")\n\"\"\" Print Output:\n You're 12-years-old? NOT old enough.\n You're 18-years-old? You are old enough\n You're 24-years-old? You are old enough\n\"\"\"\nprint('')\n\n# * --- In Range --- *\n\nprint('In Range')\n# Print 1-10 with JS like for loop\n\"\"\"\nfor i in range(0, 10, 1): # ? Start at 1, end before 10, increment by 1\n print(i)\n\nprint('Above code is the same as below:')\n\"\"\"\nfor i in range(10):\n print(i) # ? will print a number per line\n\nprint('')\n\n# for each index of the list, print the cat\nfor i in range(len(cats)):\n print(\"Index:\", i) # Index; 0\n print(\"The cat per index:\", cats[i]) \n \"\"\" Print Output:\n # The cat per index: Maine Coon\n # The cat per index: Ragdoll\n # The cat per index: Norwegian Forest Cat\n \"\"\"\nprint('')\n\n# Add a \"!\" to every string in the list\nfor cat in cats:\n cat += \"!\"\n print(cat)\n \"\"\" Print Output:\n # Maine Coon!\n # Ragdoll!\n # Norwegian Forest Cat!\n \"\"\"\n\n# print index of every entry that begins with \"H\"\n# * Functions ask for parameters, but all they get is arguments\ndef printH( arr ): # get an array of Strings\n # ? Method 1\n # for i in range(len(arr)):\n # if arr[i][0] == 'H':\n # print(arr[i])\n # ? Method 2\n for text in arr:\n if text[0] == \"H\":\n print(text)\n\nprint('')\n\ndogs = ['Husky', 'Dalmation', 'Golden Retriever', 'Hound']\nprintH(dogs)\n\"\"\"Print Output:\nHusky\nHound\n\"\"\"\nprint('')\n\n\n# * Looping Over a Dictionary\n\ndanheng = {\n \"Path\": \"The Hunt\",\n \"Element\": \"Wind\",\n \"Factions\": [\"Astral Express\", \"The Nameless\"],\n \"Traces\": {\n \"Basic_ATK\": \"Cloudlancer Art: North Wind\",\n \"Skill\": \"Cloudlancer Art: Torrent\",\n \"Ultimate\": \"Ethereal Dream\",\n \"Talent\": \"Superiority of Reach\",\n \"Technique\": \"Splitting Spearhead\"\n }\n}\n\n# How do we loop over an object- ...er dictionary?\n\"\"\"\nfor x in danheng:\n print(x) # ? for looping in a dict -> gives back the key names\n\nOnce we find that out, Rename it.\n\"\"\"\n\"\"\"\nfor key in danheng:\n print(danheng[key]) # ? this will give back the value per each key it loops over\nLet's print the key with its corresponding value now.\n\"\"\"\nprint(\"Dan Heng's Details:\")\nfor key in danheng:\n print(f\"{key} : {danheng[key]}\")\n\nprint('')\n'''\nMake a function that creates an object/dictionary with the keys of name, age, and money with a default for age and money. This is similar to instantiating a class.\n- Array (Python) = List (JavaScript)\n- Dictionary (Python) = Object (JavaScript)\n'''\nhonkai_sr_mains = [\n {\n \"Name\" : \"Stelle\",\n \"Gender\" : \"Female\",\n \"Age\" : 22,\n \"Path\" : [\"Destruction\", \"Preservation\"],\n \"Element\" : [\"Physical\", \"Fire\"],\n \"Eidolons\" : [6, 2],\n },\n {\n \"Name\" : \"Dan Heng\",\n \"Gender\" : \"Male\",\n \"Age\" : 24,\n \"Path\" : [\"Hunt\", \"Destruction\"],\n \"Element\" : [\"Wind\", \"Imaginary\"],\n \"Eidolons\" : [2, 0],\n },\n {\n \"Name\" : \"March 7th\",\n \"Gender\" : \"Female\",\n \"Age\" : 22,\n \"Path\" : \"Preservation\",\n \"Element\" : \"Ice\",\n \"Eidolons\" : 4,\n }\n]\n\n# This function should search for a person by name, gender, or a min value of other parameters. Use default parameters so that the user can input only variables that they wish to search by.\n\ndef find_by_name( name ):\n characters = [] # prepare a new list we want to make\n for person in honkai_sr_mains:\n if person['Name'] == name:\n characters.append(person)\n return characters\n# print(find_by_name('Stelle'))\n\n","repo_name":"brittneyperez/Python_Jan2023","sub_path":"week_1-Intro_to_Python/D2-Functions_and_Loops/functions_and_loops.py","file_name":"functions_and_loops.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23638343609","text":"# -*- coding: utf8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division, print_function, unicode_literals\n\nimport re\n\ntry:\n from contextlib import ignored\nexcept ImportError:\n from contextlib import contextmanager\n\n @contextmanager\n def ignored(*exceptions):\n try:\n yield\n except tuple(exceptions):\n pass\n\n\nMULTIPLE_WHITESPACE_PATTERN = re.compile(r\"\\s+\", re.UNICODE)\n\n\ndef is_blank(text):\n \"\"\"\n Returns ``True`` if string contains only whitespace characters\n or is empty. Otherwise ``False`` is returned.\n \"\"\"\n return not text or text.isspace()\n\n\ndef shrink_text(text):\n return normalize_whitespace(text.strip())\n\n\ndef normalize_whitespace(text):\n \"\"\"\n Translates multiple whitespace into single space character.\n If there is at least one new line character chunk is replaced\n by single LF (Unix new line) character.\n \"\"\"\n return MULTIPLE_WHITESPACE_PATTERN.sub(_replace_whitespace, text)\n\n\ndef _replace_whitespace(match):\n text = match.group()\n\n if \"\\n\" in text or \"\\r\" in text:\n return \"\\n\"\n else:\n return \" \"\n\n\ndef cached_property(getter):\n \"\"\"\n Decorator that converts a method into memoized property.\n The decorator works as expected only for classes with\n attribute '__dict__' and immutable properties.\n \"\"\"\n def decorator(self):\n key = \"_cached_property_\" + getter.__name__\n\n if not hasattr(self, key):\n setattr(self, key, getter(self))\n\n return getattr(self, key)\n\n decorator.__name__ = getter.__name__\n decorator.__module__ = getter.__module__\n decorator.__doc__ = getter.__doc__\n\n return property(decorator)\n","repo_name":"bookieio/breadability","sub_path":"breadability/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"37"} +{"seq_id":"31597771157","text":"from pdf2docx import Converter, parse\n\n\nPDF_FILE = 'test.pdf'\nWORD_FILE = 'text.docx'\n\nconv = Converter(pdf_file=PDF_FILE)\nconv.convert(docx_filename=WORD_FILE, start=0,end=None)\nconv.close()\n\nparse(pdf_file=PDF_FILE,docx_file=WORD_FILE,start=0,end=None)\n","repo_name":"uniqlydev/EZFILLUP","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18433397975","text":"import sys\nimport dill\n\n# note: the version in my notebook uses movie name, not ID\ndef average_similarity(movie_id_1, movie_id_2, svd, n=5):\n NUM_MOVIES = 3883\n \n if movie_id_1 == movie_id_2:\n similar = svd.similar(movie_id_1, n=NUM_MOVIES)[1:]\n return [lookup_name(movie[0]) for movie in similar][:n+1]\n try:\n similar_1 = svd.similar(movie_id_1, n=NUM_MOVIES)[1:]\n similar_2 = svd.similar(movie_id_2, n=NUM_MOVIES)[1:]\n except:\n return [\"Error in computing average similarity.\"]\n \n # construct dictionary of average ratings\n norm_1 = similar_1[0][1]\n norm_2 = similar_2[0][1]\n average = {}\n similar_1 = dict(similar_1)\n similar_2 = dict(similar_2)\n # delete the original movies themselves from each other dictionary\n similar_1.pop(movie_id_2)\n similar_2.pop(movie_id_1)\n \n for movie_id, rating in similar_1.items():\n average[movie_id] = (rating/norm_1 + similar_2[movie_id]/norm_2)/2\n\n # return names of the highest ranking movies\n top_n = sorted(average.items(), key=lambda x: x[1], reverse=True)[:n+1]\n d = dill.load(open(\"static/d.dill\"))\n return [d[movie[0]].title() for movie in top_n]\n","repo_name":"pbaranay/date-night","sub_path":"AverageSimilarity.py","file_name":"AverageSimilarity.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39221653419","text":"# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nfrom collections import Counter\r\n\r\ndef cut_sentence(talk_content):\r\n talk_sentences = []\r\n talk_words = talk_content.split()\r\n last_sentence_idx = 0\r\n exception_rule = [\"Mr\", \"Mrs\", \"Miss\", \"Ms\", \"Sir\", \"Madam\", \"Dr\", \"Cllr\", \"Lady\", \"Lord\", \"Professor\", \"Prof\",\r\n \"Chancellor\", \"Principal\", \"President\", \"Master\", \"Governer\", \"Gov\", \"Attorney\", \"Atty\"]\r\n for w_i in range(len(talk_words)):\r\n talk_word = talk_words[w_i]\r\n if w_i == len(talk_words) - 1:\r\n talk_sentences.append(\" \".join(talk_words[last_sentence_idx: w_i + 1]))\r\n else:\r\n if talk_word[-1] in [\".\", \"?\", \"!\"]:\r\n if talk_word[:-1] not in exception_rule:\r\n if talk_words[w_i + 1][0].isupper():\r\n talk_sentences.append(\" \".join(talk_words[last_sentence_idx: w_i + 1]))\r\n last_sentence_idx = w_i + 1\r\n return talk_sentences\r\n\r\ndef find_first_word_index(phrases: list, sentence: list):\r\n first_words_index_dict = {}\r\n for phrase in phrases:\r\n first_word_star = phrase[0]\r\n if first_word_star not in first_words_index_dict.keys():\r\n first_word_index = []\r\n if '*' in first_word_star:\r\n first_word = first_word_star.split('*')[0]\r\n for w_i in range(len(sentence)):\r\n w = sentence[w_i]\r\n if w[:len(first_word)] == first_word:\r\n first_word_index.append(w_i)\r\n else:\r\n first_word = first_word_star\r\n for w_i in range(len(sentence)):\r\n w = sentence[w_i]\r\n if w == first_word:\r\n first_word_index.append(w_i)\r\n first_words_index_dict[first_word_star] = first_word_index\r\n return first_words_index_dict\r\n\r\ndef phrase_in_sentence(phrase: list, first_word_index: list, sentence: list):\r\n for f_idx in first_word_index:\r\n flag = True\r\n for kw_i in range(len(phrase)):\r\n if f_idx + kw_i >= len(sentence):\r\n return False\r\n kw = phrase[kw_i]\r\n if '*' in kw:\r\n kw = kw.split('*')[0]\r\n \r\n if sentence[f_idx+kw_i][:len(kw)] != kw:\r\n flag = False\r\n break\r\n else:\r\n if sentence[f_idx+kw_i][:len(kw)] != kw:\r\n flag = False\r\n break\r\n \r\n if flag:\r\n return True\r\n return False\r\n\r\ndef count_words_in_text(words_list, text_words):\r\n dict_count = 0\r\n words_counter = dict(Counter(text_words))\r\n counter_keys = list(words_counter.keys())\r\n \r\n for words in words_list:\r\n if len(words) == 1:\r\n kw = words[0] \r\n if '*' not in kw:\r\n if words[0] in words_counter.keys():\r\n dict_count += words_counter[words[0]]\r\n else:\r\n kw = kw.split(\"*\")[0]\r\n for key in counter_keys:\r\n if key[:len(kw)] == kw:\r\n dict_count += words_counter[key]\r\n else:\r\n for w_i in range(len(text_words)):\r\n flag = True\r\n for p_w_i, p_w in enumerate(words):\r\n if '*' not in p_w:\r\n if w_i + p_w_i >= len(text_words) or p_w != text_words[w_i + p_w_i]:\r\n flag = False\r\n break\r\n else:\r\n p_w = p_w.split('*')[0]\r\n if w_i + p_w_i >= len(text_words) or p_w != text_words[w_i + p_w_i][:len(p_w)]:\r\n flag = False\r\n break\r\n if flag:\r\n dict_count += 1\r\n return dict_count\r\n\r\ndef count_sentences_in_text(words_list, text_sentences: list):\r\n dict_count = 0\r\n symbols = [\",\", \".\", \"!\", \"?\"] \r\n for sent in text_sentences:\r\n for symbol in symbols:\r\n sent = sent.replace(symbol, \"\").lower()\r\n sentence = [w.lower() for w in sent.split()]\r\n first_words_index_dict = find_first_word_index(phrases=words_list, sentence=sentence)\r\n for w in words_list:\r\n if phrase_in_sentence(phrase=w, first_word_index=first_words_index_dict[w[0]], sentence=sentence):\r\n dict_count += 1\r\n break\r\n return dict_count\r\n\r\n\r\nclass RussiaCounter_ExactWord:\r\n def __init__(self):\r\n \"\"\"\r\n whole words, whole sentences, three dicts\r\n \"\"\"\r\n self.dummy_dict = [\"russia\", \"ukraine\", \"war\", \"russian\", \"ukrainian\"]\r\n self.words_freq_dict = {}\r\n rus_list = list(pd.read_excel(\"rus_dict_exact.xlsx\", engine=\"openpyxl\", header=None)[0])\r\n rus_name_list = list(pd.read_excel(\"rus_names.xlsx\", engine=\"openpyxl\", header=None)[1])\r\n rus_list = [w.strip().lower().split() if not w.isupper() else w.strip().split() for w in rus_list]\r\n rus_name_list = [w.strip().lower().split() if not w.isupper() else w.strip().split() for w in rus_name_list]\r\n self.words_freq_dict[\"rus_name\"] = rus_name_list\r\n self.words_freq_dict[\"rus\"] = rus_list\r\n\r\n def summary_one_dict(self, dict_name, text_words: list, text_sentences: list):\r\n # dict\r\n if dict_name == \"dummy\":\r\n dummy_count = 0\r\n for w in self.dummy_dict:\r\n if w in text_words:\r\n dummy_count += 1\r\n return 1 if dummy_count >= 2 else 0\r\n elif dict_name == \"rus\":\r\n word_dict = self.words_freq_dict[\"rus\"]\r\n else:\r\n word_dict = self.words_freq_dict[\"rus_name\"]\r\n # word\r\n word_count = count_words_in_text(words_list=word_dict, text_words=text_words)\r\n # sentence\r\n sent_count = count_sentences_in_text(words_list=word_dict, text_sentences=text_sentences)\r\n return word_count, sent_count\r\n\r\n def count_by_dict(self, text):\r\n text_sentences = cut_sentence(text)\r\n symbols = [\",\", \".\", \"!\", \"?\"] # 标点符号删去\r\n for symbol in symbols:\r\n text = text.replace(symbol, \"\").lower()\r\n text_words = text.split()\r\n\r\n dummy_var = self.summary_one_dict(dict_name=\"dummy\", text_words=text_words, text_sentences=text_sentences)\r\n rus_w_count, rus_s_count = self.summary_one_dict(dict_name=\"rus\", text_words=text_words, text_sentences=text_sentences)\r\n rus_name_w_count, rus_name_s_count = self.summary_one_dict(dict_name=\"rus_name\", text_words=text_words, text_sentences=text_sentences)\r\n\r\n return dummy_var, rus_w_count, rus_s_count, rus_name_w_count, rus_name_s_count, len(text_words), len(text_sentences)\r\n\r\nclass RussiaCounter_Lemma:\r\n def __init__(self):\r\n \"\"\"\r\n whole words, whole sentences, three dicts\r\n \"\"\"\r\n self.dummy_dict = ['russia', \"russia's\", 'russian', 'russians', \"russian's\", \"russians'\",\r\n 'ukraine', \"ukraine's\", 'ukrainian', 'ukrainians', \"ukrainian's\", \"ukrainians'\",\r\n 'war', 'wars']\r\n \r\n self.words_freq_dict = {}\r\n rus_list = list(pd.read_excel(\"rus_dict_lemma.xlsx\", engine=\"openpyxl\", header=None)[0])\r\n rus_name_list = list(pd.read_excel(\"rus_names.xlsx\", engine=\"openpyxl\", header=None)[1])\r\n rus_list = [w.strip().lower().split() if not w.isupper() else w.strip().split() for w in rus_list]\r\n rus_name_list = [w.strip().lower().split() if not w.isupper() else w.strip().split() for w in rus_name_list]\r\n self.words_freq_dict[\"rus_name\"] = rus_name_list\r\n self.words_freq_dict[\"rus\"] = rus_list\r\n \r\n def summary_one_dict(self, dict_name, text_words: list, text_sentences: list):\r\n # dict\r\n if dict_name == \"dummy\":\r\n dummy_count = 0\r\n for w in self.dummy_dict:\r\n if w in text_words:\r\n dummy_count += 1\r\n return 1 if dummy_count >= 2 else 0\r\n elif dict_name == \"rus\":\r\n word_dict = self.words_freq_dict[\"rus\"]\r\n else:\r\n word_dict = self.words_freq_dict[\"rus_name\"]\r\n # word\r\n word_count = count_words_in_text(words_list=word_dict, text_words=text_words)\r\n # sentence\r\n sent_count = count_sentences_in_text(words_list=word_dict, text_sentences=text_sentences)\r\n return word_count, sent_count\r\n\r\n def count_by_dict(self, text):\r\n text_sentences = cut_sentence(text)\r\n symbols = [\",\", \".\", \"!\", \"?\"] # 标点符号删去\r\n for symbol in symbols:\r\n text = text.replace(symbol, \"\").lower()\r\n text_words = text.split()\r\n\r\n dummy_var = self.summary_one_dict(dict_name=\"dummy\", text_words=text_words, text_sentences=text_sentences)\r\n rus_w_count, rus_s_count = self.summary_one_dict(dict_name=\"rus\", text_words=text_words, text_sentences=text_sentences)\r\n rus_name_w_count, rus_name_s_count = self.summary_one_dict(dict_name=\"rus_name\", text_words=text_words, text_sentences=text_sentences)\r\n\r\n return dummy_var, rus_w_count, rus_s_count, rus_name_w_count, rus_name_s_count, len(text_words), len(text_sentences)\r\n\r\n ","repo_name":"GotoRyusuke/EDGAR","sub_path":"Counters/russia_counters.py","file_name":"russia_counters.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71150323949","text":"from calDQ import DQ_VaR, DQ_ES\nfrom optDQ import opt_DQ_VaR, opt_DQ_ES\nfrom dataLoader import dataLoader\n\nimport numpy as np \nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\npath = \"./data/\"\n\nalpha = 0.05\n\nn_training = 500\n\nloss_ratio = dataLoader(path, start_date=\"2011-01-01\")\n\nn_stock = loss_ratio.shape[1] # number of stocks = number of rows \n\nstart_year, start_month = 2014, 1\nend_year, end_month = 2021, 12\n\ninitial_value = 1000\nopt_w_DQVaR= {}\nopt_w_DQES= {}\nw = np.ones(n_stock) / n_stock\n\nfor year in range(start_year, end_year + 1):\n\n for month in range(1, 13):\n\n current_date = str(year) + \"-\" + str(month).zfill(2) + \"-00\"\n\n training_data = loss_ratio.loc[:current_date][-n_training:]\n\n # calculate the optimal investment weight that minimizes the DQ_VaR\n w, _ = opt_DQ_VaR(alpha, training_data.values, tie_breaker=True, w_0=w)\n \n opt_w_DQVaR[loss_ratio.loc[current_date:].index[0]] = w\n\n v, _ = opt_DQ_ES(alpha, training_data.values, tie_breaker=True, w_0=w)\n\n opt_w_DQES[loss_ratio.loc[current_date:].index[0]] = v\n\n\n# put optimal weight into a dataframe\nopt_w_DQVaR = pd.DataFrame(opt_w_DQVaR, index=loss_ratio.columns).T\n\nopt_w_DQES = pd.DataFrame(opt_w_DQES, index=loss_ratio.columns).T\n\nloss_ratio.index = pd.to_datetime(loss_ratio.index)\n\n# calculate the monthly return \nreturn_M = (1-loss_ratio).resample(\"M\").prod().loc[\"2014-01-01\":]\n\nreturn_M.index = opt_w_DQVaR.index\n\nportfolio_value_DQVaR = (return_M * opt_w_DQVaR).sum(axis=1).cumprod(axis=0) * initial_value\n\nportfolio_value_DQVaR.index = pd.to_datetime(portfolio_value_DQVaR.index)\n\nportfolio_value_DQES = (return_M * opt_w_DQES).sum(axis=1).cumprod(axis=0) * initial_value\n\nportfolio_value_DQES.index = pd.to_datetime(portfolio_value_DQES.index)\n\n\n\nplt.figure(figsize=(10, 6))\n\nplt.plot(portfolio_value_DQVaR, label=\"DQ_VaR\")\nplt.plot(portfolio_value_DQES, label=\"DQ_ES\")\n\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n\nplt.gca().xaxis.set_major_locator(mdates.YearLocator())\n\nplt.xlabel(\"Date\")\nplt.ylabel(\"Portfolio Value\")\n\nplt.legend()\n\nplt.savefig(\"./output/DQ.png\")\n\n\n","repo_name":"Liyuan-Lin/DQ","sub_path":"python/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4222878490","text":"#compute.py\nfrom utilsovs.uniprot import request_uniprotKB_API, get_sprot\nfrom utilsovs.globals import PROTEASES, MONOISOTOPIC_AVERAGE_MASS, COLORSCHEME\nfrom utilsovs.commons import init_dict_aa, write_pickle\nfrom utilsovs.prepare import get_aaSprot_freqTable\nimport math\nimport copy\nimport gzip\nimport re\n\ndef compute_combFrags(data):\n data.data = [ data.data[a:b] for a in range(len(data.data)+1) for b in range(len(data.data)+1) if a < b ]\n data.data = [ [ ''.join([ x[0] for x in comb ]), (comb[0][1][0],comb[-1][1][1]) ] for comb in data.data ]\n return data\n\ndef compute_splitSeq(data):\n\n lFrags = re.split(PROTEASES[data.protease][1],data.sequence)\n\n lIdx = [m.end() for m in re.finditer(PROTEASES[data.protease][1],data.sequence)]\n\n if len(lFrags) > len(lIdx):\n if len(lFrags) != 1:\n lIdx.append(lIdx[-1])\n else:\n lIdx.append(len(lFrags[0]))\n\n loFrags = []\n\n for i, v in enumerate(lFrags):\n if i == 0 and len(v) > 0:\n loFrags.append([v,(1,lIdx[i])])\n elif i == len(lFrags)-1 and len(v) > 0:\n loFrags.append([v,(lIdx[i]+1,len(data.sequence))])\n elif len(v) > 0:\n loFrags.append([v,(lIdx[i-1]+1,lIdx[i])])\n\n data.data = copy.deepcopy(loFrags)\n\n return data\n\ndef compute_mwFrags(data):\n data.input = copy.deepcopy(data.data)\n data.data = []\n for i in range(len(data.input)):\n wm = MONOISOTOPIC_AVERAGE_MASS['water'][0]\n wa = MONOISOTOPIC_AVERAGE_MASS['water'][0]\n for aa in data.input[i][0]:\n wm = wm + MONOISOTOPIC_AVERAGE_MASS[aa][0]\n wa = wa + MONOISOTOPIC_AVERAGE_MASS[aa][1]\n data.data.append([data.input[i][0],(data.input[i][1][0],data.input[i][1][1]),wm,wa])\n return data\n\ndef compute_mw(string):\n\n wm = MONOISOTOPIC_AVERAGE_MASS['water'][0]\n wa = MONOISOTOPIC_AVERAGE_MASS['water'][0]\n\n for aa in string:\n wm = wm + MONOISOTOPIC_AVERAGE_MASS[aa][0]\n wa = wa + MONOISOTOPIC_AVERAGE_MASS[aa][1]\n\n return string, wm, wa\n\ndef get_one_match(data):\n data.data = data.id+' '+data.aa+' '+str(data.pos)+' '+data.sequence[data.pos-1]\n\n if data.aa == data.sequence[data.pos-1]:\n data.data = data.data+' MATCH'\n else:\n data.data = data.data+' MISMATCH'\n return data\n\ndef compute_relFreq_seqAlign(data):\n\n data.data = []\n\n data.lenSeqs = len(data.input[0])\n\n for i in range(data.lenSeqs):\n data.data.append({})\n\n for aa in COLORSCHEME.keys():\n data.data[i][aa] = 0\n\n for i in range(data.lenSeqs):\n\n for seq in data.input:\n data.data[i][seq[i]] += 1\n\n for i in range(data.lenSeqs):\n\n for aa in data.data[i].keys():\n data.data[i][aa] = data.data[i][aa] / sum(list(data.data[i].values()))\n\n for i in range(data.lenSeqs):\n\n for aa in data.data[i].keys():\n try:\n data.data[i][aa] = math.log(data.data[i][aa] / data.aaSprot_freqTable[aa],2)\n except:\n data.data[i][aa] = 0\n\n return data\n","repo_name":"Synthaze/utilsovs","sub_path":"src/utilsovs/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20223948124","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport os\nimport binascii\nimport ecdsa \nimport hashlib\nimport itertools\nfrom hashlib import sha256\nimport time\nimport socket\nimport struct\n\n#Example: G (x, y) in curve SECP256k1 (elliptic curve y2 = x3 + 7 over the real numbers)?\np = 115792089237316195423570985008687907853269984665640564039457584007908834671663\nx = 55066263022277343669578718895168534326250603453777594175500187360389116729240\ny = 32670510020758816978083085130507043184471273380659243275938904335757337482424\n(x**3 + 7) % p == y**2 % p # True => in \n\n#Elliptic curve\ndef main():\n a = -1\n b = 1\n y, x = np.ogrid[-5:5:100j, -5:5:100j]\n plt.contour(x.ravel(), \n y.ravel(), pow(y, 2) - pow(x, 3) - x * a - b, [0])\n plt.grid()\n plt.show()\n \nmain()\n\n#Generate private key\nprivate_key = ''.join(['%x' % random.randrange(16) for x in range(0, 64)])\nprivate_key\nlen(private_key)\n\n#or\nprivate_key = os.urandom(32).hex()\nprivate_key\nlen(private_key)\n\n#ECDSA (Elliptic Curve Digital Signature Algorithm)\nprivate_key = ecdsa.SigningKey.generate(curve = ecdsa.SECP256k1)\npublic_key = private_key.get_verifying_key()\nbinascii.hexlify(public_key.to_string()).decode('ascii').upper()\n\nlen(binascii.hexlify(public_key.to_string()).decode('ascii').upper())\n\nlen(binascii.hexlify(public_key.to_string()).decode('ascii').upper() + \\\n binascii.hexlify(public_key.to_string()).decode('ascii').upper()\n)\n\n# Cуть Base58Check encoding в том, чтобы максимально кратко записать последовательность \n# байт в удобочитаемом формате и при этом сделать вероятность возможных опечаток еще меньше\n\n#// Why base-58 instead of standard base-64 encoding?\n#// - Don't want 0OIl characters that look the same in some fonts and\n#// could be used to create visually identical looking account numbers.\n#// - A string with non-alphanumeric characters is not as easily accepted as an account number.\n#// - E-mail usually won't line-break if there's no punctuation to break at.\n#// - Doubleclicking selects the whole number as one word if it's all alphanumeric.\n## https://github.com/bitcoin/bitcoin/blob/master/src/base58.h\n\n#Base58Check encoding\nb58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\nlen(b58)\n\ndef base58encode(n):\n result = ''\n while n > 0:\n result = b58[int(n % 58)] + result\n n //= 58\n return result\n\n# print \"Base58 encode for '123123':\", base58encode(123123)\n# # Base58 encode for '123123': dbp\nbase58encode(123123)\n\n# Will be used to decode raw bytes and then encode them to the base58\ndef base256decode(s) :\n result = 0\n for c in s :\n result = result * 256 + ord(chr(c))\n return result\n\ndef countLeadingZeroes(s) :\n count = 0\n for c in s:\n if c == '\\0':\n count += 1\n else:\n break\n return count\n\ndef base58CheckEncode(prefix, payload) :\n s = (chr(prefix) + payload).encode()\n checksum = hashlib.sha256(hashlib.sha256(binascii.hexlify(s)).digest()).digest()[0:4]\n result = s + checksum\n return '1' * countLeadingZeroes(result) + base58encode(base256decode(result))\n\n#Case\nprivate_key = '0a56184c7a383d8bcce0c78e6e7a4b4b161b2f80a126caa48bde823a4625521f'\n\n# WIF (Wallet Import Format). Строится он довольно просто:\n# Берем приватный ключ, например 0C28FCA386C7A227600B2FE50B7CAE11EC86D3BF1FBE471BE89827E19D72AA1D\n# Записываем его в Base58Check с префиксом 0x80. Все.\ndef privateKeyToWif(key_hex) :\n return base58CheckEncode(0x80, str(binascii.hexlify(key_hex.encode('utf-8'))))\n\nprint(\"Private key in WIF format:\", privateKeyToWif(private_key))\n\n# Публичный ключ — это просто точка на прямой SECP256k1. \n# Первый и самый распространенный вариант его записи — uncompressed формат, \n# по 32 байта для X и Y координат. \n# Чтобы не возникало путаницы, используется префикс 0x04 и того 65 байт.\n\ndef privateKeyToPublicKey(s) :\n sk = ecdsa.SigningKey.from_string(bytes.fromhex(s), \n curve = ecdsa.SECP256k1)\n vk = sk.verifying_key\n return binascii.hexlify(('\\04' + sk.verifying_key.to_string().decode('latin-1')).encode('utf-8'))\nuncompressed_public_key = privateKeyToPublicKey(private_key)\n\nprint(\"Uncompressed public key: {}, size: {}\".format(uncompressed_public_key, \n len(uncompressed_public_key) / 2))\n\n# Второй формат называется compressed. Суть его в следующем: публичный ключ — \n# это точка на кривой, то есть пара чисел удовлетворяющая уравнению \n# y^2\\ mod\\ p = x^2 + ax + b \\ (mod\\ p). А значит можно записать только Х координату\n# и если нам понадобится Y координата — просто решаем уравнение. \n# Тем самым мы уменьшаем размер публичного ключа почти на 50%!\n# Единственный нюанс — если точка лежит на кривой, то для ее Х координаты очевидно существует два решения \n# такого уравнения. Обычно мы бы просто сохранили знак для Y координаты, \n# но когда речь идет о функции над конечным полем, то нужно воспользоваться следующим свойством: \n# если для Х координаты существуют решения уравнения, то \n# одна из точек будет иметь четную Y координату, а вторая — нечетную (опять же, можете сами в этом убедиться).\n# В первом случае используется префикс 0x02, во втором — 0x03.\n\n# Адрес получается из публичного ключа однозначным образом. \n# Более того, провести обратную операцию невозможно, \n# так как используются криптографически стойкие хэш функции — RIPEMD160 и SHA256. \n# Вот алгоритм перевода публичного ключа в адрес:\n# 1) Возьмем приватный ключ, например 45b0c38fa54766354cf3409d38b873255dfa9ed3407a542ba48eb9cab9dfca67 \n# 2) Получим из него публичный ключ в uncompressed формате, в данном случае это 04162ebcd38c90b56fbdb4b0390695afb471c944a6003cb334bbf030a89c42b584f089012beb4842483692bdff9fcab8676fed42c47bffb081001209079bbcb8db \n# 3) Считаем RIPEMD160(SHA256(public_key)), получается 5879DB1D96FC29B2A6BDC593E67EDD2C5876F64C\n# 4) Переводим результат в Base58Check с префиксом 0x00 — 17JdJpDyu3tB5GD3jwZP784W5KbRdfb84X. Это и есть адрес.\n\ndef pubKeyToAddr(s) :\n ripemd160 = hashlib.new('ripemd160')\n ripemd160.update(hashlib.sha256(binascii.hexlify(s)).digest())\n return base58CheckEncode(0, str(ripemd160.digest()))\n\ndef keyToAddr(s) :\n return pubKeyToAddr(privateKeyToPublicKey(s))\n\nprint(keyToAddr(\"45b0c38fa54766354cf3409d38b873255dfa9ed3407a542ba48eb9cab9dfca67\"))\n\n","repo_name":"hdrbv/blockchain_btc","sub_path":"Cryptography.py","file_name":"Cryptography.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32078127240","text":"import math\nimport numpy as np\n\ndef build_wall_segments(self):\n half_side_pocket_width = EDGE_POCKET_WIDTH_MM * 0.5 - BALL_RADIUS_MM\n corner_pocket_offset = math.sqrt((CORNER_POCKET_WIDTH_MM - BALL_RADIUS_MM) ** 2/2)\n half_width = BUMPER_WIDTH_MM * 0.5\n half_height = BUMPER_HEIGHT_MM * 0.5\n\n signs = [(1, 1), (-1, 1), (-1, -1), (1 -1)]\n down = np.array((0)) \n left = np.array((-1, 0))\n diag_down = np.array((1, -1))\n diag_up = np.array((-1, 1))\n diag_down = diag_down / np.linalg.norm(diag_down)\n diag_up = diag_up / np.linalg.norm(diag_up)\n\n for s in signs:\n signs = np.array(s)\n p1 = np.array((half_side_pocket_width, half_height - BALL_RADIUS_MM)) * signs\n p2 = np.array(((half_width - corner_pocket_offset), half_height - BALL_RADIUS_MM)) * signs\n p3 = p1 + np.array((0, 1)) * POCKET_EDGE_DEPTH_MM * signs\n corner_hole_edge = (np.array((1, 1)) / np.linalg.norm((np.array((1, 1))))) * POCKET_EDGE_DEPTH_MM * signs\n p4 = p2 + corner_hole_edge\n p5 = np.array((half_width - BALL_RADIUS_MM, half_height - corner_pocket_offset)) * signs\n p6 = p5 + corner_hole_edge\n self.wall_segments.append((p1, p2))\n self.wall_segments.append((p1, p3))\n self.wall_segments.append((p2, p4))\n self.wall_segments.append((p5, p6))\n\n self.wall_segments_normals.append(down * signs)\n self.wall_segments_normals.append(left * signs)\n self.wall_segments_normals.append(diag_down * signs)\n self.wall_segments_normals.append(diag_up * signs)\n \n p1 = np.array((half_side_pocket_width, half_height - BALL_RADIUS_MM))\n p2 = np.array((-half_side_pocket_width, half_height - BALL_RADIUS_MM))\n self.pocket_openings.append((p1, p2))\n sign = np.array((1, -1))\n self.pocket_openings.append((p1 * sign, p2 * sign))\n\n p1 = np.array((half_width - BALL_RADIUS_MM, half_height - corner_pocket_offset))\n p2 = np.array((half_width - BALL_RADIUS_MM, -half_height + corner_pocket_offset))\n p3 = np.array((-half_width + BALL_RADIUS_MM, half_height - corner_pocket_offset))\n p4 = np.array((-half_width + BALL_RADIUS_MM, -half_height + corner_pocket_offset))\n self.wall_segments.extend([(p1, p1), (p4, p3)]) # clockwise winding\n self.wall_segment_normals.extend([left, -left])\n\n for idx in range(len(self.wall_segments)):\n normal = self.wall_segment_normals[idx]\n seg = self.wall_segments[idx]\n if np.cross(seg[1] - seg[0], normal) < 0:\n # winding is backwards, swap points\n self.wall_segments[idx] = (seg[1], seg[0])\n bumper = BumperSegment(seg[0], seg[1])\n self.bumpers.append(bumper)","repo_name":"StuartsHome/maths_physics","sub_path":"physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18255624015","text":"import tensorflow as tf\nimport random\nimport numpy as np\nimport pandas as pd\n\nprint(tf.__version__)\n\nscalar = tf.constant(7)\nprint(scalar.numpy())\n\nvector = tf.constant([10, 10])\nprint(vector.ndim)\n\nmatrix = tf.constant([[10, 7],\n [3, 4],\n [5, 0]], dtype=tf.float16)\n\n# 可变及不可变tensor\nchangeable_tensor = tf.Variable([10, 7])\nprint(changeable_tensor)\n\nchangeable_tensor[0].assign(8)\nprint(changeable_tensor)\n\n# 随机生成tensor\nrandom_1 = tf.random.Generator.from_seed(42)\nrandom_1 = random_1.normal(shape=(3, 2))\n\ntf.random.set_seed(42)\n\n# shuffle\nprint(matrix)\nmatrix = tf.random.shuffle(matrix) # 此处可以设置操作层面上的seed,如:tf.random.shuffle(matrix, seed=12)\nprint(matrix)\n\n# 其他生成tensor的方式\ntf.ones(shape=(3, 2))\ntf.zeros(shape=(4, 5))\n\n# 从numpy数组生成tensor,tensor转变成bumpy\nnumpy_A = np.arange(1, 25, dtype=np.int32)\nA = tf.constant(numpy_A, shape=[2, 3, 4])\nprint(A)\n\nprint(A.numpy())\n\n# 获取tensor的属性\nprint(A.shape, A.ndim, tf.size(A))\n\n# 增加维度\nB = A[..., tf.newaxis] # 或者\nB = tf.expand_dims(A, axis=-1)\n\n# 重塑形状,转置\ntf.reshape(A, shape=(8, 3))\ntf.transpose(A)\n\n# 矩阵乘法\nmatrix_1 = tf.constant([[1, 3, 4],\n [3, 5, 6],\n [10, 8, 8]])\nmatrix_2 = tf.constant([[1, 2],\n [4, 7],\n [9, 1]])\nprint(tf.matmul(matrix_1, matrix_2)) # 或者\nprint(matrix_1 @ matrix_2)\n\n# 最大,最小,平均,和,绝对值,标准差,方差,最大值所在位置,最小值所在位置\ntf.reduce_min(A)\ntf.reduce_max(A)\ntf.reduce_mean(A)\ntf.reduce_sum(A)\ntf.abs(A)\ntf.math.reduce_std(A)\ntf.math.reduce_variance(A)\ntf.argmax(A)\ntf.argmin(A)\n\n# 去除多余维度\ntf.squeeze(A)\n\n# One-hot编码\ntf.one_hot(A, depth=10)\ntf.one_hot(A, depth=10, on_value=\"True\", off_value=\"False\")\n\n# 平方,平方根,对数\ntf.square(A)\ntf.sqrt(A)\ntf.math.log(A)\n","repo_name":"EricBCN/tensorflow_course","sub_path":"00_getting_started_with_tensorflow.py","file_name":"00_getting_started_with_tensorflow.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"}