diff --git "a/5898.jsonl" "b/5898.jsonl" new file mode 100644--- /dev/null +++ "b/5898.jsonl" @@ -0,0 +1,69 @@ +{"seq_id":"37099813005","text":"import random\n\ndef main_5_11():\n num1 = random.randint(1, 999)\n num2 = random.randint(1, 999)\n print(num1)\n print('+', num2, sep=' ')\n result(num1 + num2)\n\ndef result(summ):\n res_user = float(input('Введите ваш ответ -'))\n if summ == res_user:\n print('\\nВы правильно ответили')\n print(\"Поздравляю\")\n else:\n print(\"\\nВы ошиблись\")\n print(\"Праильный ответ -\", summ, sep=\" \")\n\nmain_5_11()","repo_name":"Rongiss/task_5","sub_path":"task_5_11/main_5_11.py","file_name":"main_5_11.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33540249075","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sumNumbers(self, root: Optional[TreeNode]) -> int:\n nums = []\n \n def backtrack(root, curr):\n if not root.left and not root.right:\n curr.append(str(root.val))\n nums.append(\"\".join(curr[:]))\n return\n \n curr.append(str(root.val))\n if root.left:\n backtrack(root.left, curr[:])\n if root.right:\n backtrack(root.right, curr[:])\n \n backtrack(root, [])\n \n ans = 0\n for num in nums:\n ans += int(num)\n \n return ans\n ","repo_name":"PaulosNed/A2SV","sub_path":"0129-sum-root-to-leaf-numbers/0129-sum-root-to-leaf-numbers.py","file_name":"0129-sum-root-to-leaf-numbers.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30755655052","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport argparse\nimport datetime\n\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom re import compile as regexp_compile\n\nimport rows\n\nfrom lxml.etree import HTML\n\n\nREGEXP_PAGE = regexp_compile(r'^[0-9]+ de [0-9]+$')\nMONTHS = 'JAN FEV MAR ABR MAI JUN JUL AGO SET OUT NOV DEZ'\nFIELDS = OrderedDict([('category', rows.fields.TextField),\n ('description', rows.fields.TextField),\n ('value', rows.fields.DecimalField),\n ('date', rows.fields.DateField)])\n\n\ndef partition(data, number):\n for index in range(0, len(data), number):\n yield data[index:index + number]\n\n\ndef convert_text(text):\n return text.replace('\\xa0', ' ')\n\n\ndef convert_value(value):\n return Decimal(convert_text(value).replace('.', '').replace(',', '.')\n .strip().replace(' ', ''))\n\ndef convert_date(value, year):\n day, month = convert_text(value).split()\n day = int(day)\n month = MONTHS.split().index(month) + 1\n return datetime.date(year, month, day)\n\n\ndef extract_month(entry):\n return convert_text(entry[3]).split()[1]\n\n\ndef html_to_table(input_filename, encoding='utf-8'):\n with open(input_filename) as fobj:\n html = fobj.read().decode(encoding).replace('\\xa0', ' ')\n tree = HTML(html)\n\n data = tree.xpath('//body/b')\n for index, element in enumerate(data):\n text = element.text\n if text.startswith('Valores') and text.endswith('R$'):\n break\n new = []\n for element in data[index + 1:]:\n text = element.text\n if text.startswith('FATURA DE '):\n continue\n elif REGEXP_PAGE.findall(text):\n continue\n else:\n new.append(element.text)\n data = new\n\n chunks = [[value.strip() for value in row]\n for row in partition(data, 4) if len(row) == 4]\n table = rows.Table(fields=FIELDS)\n current_year = datetime.datetime.now().year\n months = set(extract_month(row) for row in chunks)\n subtract_year = 'DEZ' in months and 'JAN' in months\n for row in chunks:\n try:\n category = convert_text(row[0])\n description = convert_text(row[1])\n value = convert_value(row[2])\n except:\n print('WARNING: Ignoring row: {}'.format(row))\n continue\n year = current_year\n month = extract_month(row)\n if subtract_year and month in ('NOV', 'DEZ'):\n year = current_year - 1\n date = convert_date(row[3], year)\n table.append({'category': category,\n 'description': description,\n 'value': value,\n 'date': date, })\n\n return table\n\n\ndef sum_iof_into_entries(table):\n entries, iofs = [], {}\n for row in table:\n description = row.description.lower()\n if description.startswith('iof de \"'):\n entry_description = description.split('\"')[1].strip()\n iofs[entry_description] = row\n else:\n entries.append(row)\n\n table = rows.Table(fields=FIELDS)\n for entry in entries:\n description = entry.description.lower().strip()\n entry = {'description': entry.description.strip(),\n 'value': entry.value,\n 'category': entry.category,\n 'date': entry.date, }\n if description in iofs:\n iof = iofs[description]\n entry['description'] += ' (+ IOF)'\n entry['value'] += iof.value\n table.append(entry)\n\n table.order_by('date')\n return table\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('html_entrada')\n parser.add_argument('csv_saida')\n args = parser.parse_args()\n\n table = sum_iof_into_entries(html_to_table(args.html_entrada))\n rows.export_to_csv(table, args.csv_saida)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"turicas/nubank-to-csv","sub_path":"nubank.py","file_name":"nubank.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"12"} +{"seq_id":"21097573199","text":"import json\nimport sys\nfrom pathlib import Path\nfrom typing import List\n\nimport cv2\nimport numpy as np\nfrom scipy import stats\nfrom scipy.signal import convolve2d, correlate2d\nfrom tqdm import tqdm\n\nfrom measures.base_measure import BaseMeasure\n\nsys.path.append(str(Path(\".\").resolve().parent))\n\n\ndef RGBtoYxy(RGB: np.ndarray) -> np.ndarray:\n \"\"\" convert image from RGB to Yxy\n\n Arguments: \n {np.ndarray} -- Input image in RGB color space as numpy array\n\n Return: \n {np.ndarray} -- Output image in Yxy color space as numpy array\n\n \"\"\"\n # Matrix for color converion: p.22 https://engineering.purdue.edu/~bouman/ece637/notes/pdf/ColorSpaces.pdf\n M = np.array([[0.4124, 0.3576, 0.1805],\n [0.2126, 0.7152, 0.0722],\n [0.0193, 0.1192, 0.9505]], dtype=np.float32)\n # RGB image is reshaped to 2D array where each row corresponds to the rgb values of a single pixel\n RGB2 = np.reshape(RGB, (RGB.shape[0] * RGB.shape[1], 3))\n # Matrixmultiplication with conversion matrix -> each row corresponds to the Yxy values of a single pixel\n XYZ2 = M @ RGB2.T\n S = np.sum(XYZ2, axis=0)\n Yxy2 = np.zeros_like(XYZ2)\n EPS = np.finfo(float).eps\n Yxy2[0, :] = XYZ2[1, :]\n Yxy2[1, :] = XYZ2[0, :] / (S + EPS)\n Yxy2[2, :] = XYZ2[1, :] / (S + EPS)\n Yxy = np.reshape(Yxy2.T, (RGB.shape[0], RGB.shape[1], 3))\n return Yxy\n\n\ndef create_gaussian_filter(ksize: int, sigma: float) -> np.ndarray:\n \"\"\" equivalent implementation of gaussian filter to matlab\n\n Arguments: \n {ksize: int} -- size of filter\n {sigma: float} -- smoothing parameter\n\n Return: \n {np.ndarray} -- 2d gaussian filter as numpy array\n \"\"\"\n kernel = np.zeros((2 * ksize + 1, 2 * ksize + 1))\n center = ksize\n for i in range(2 * ksize + 1):\n for j in range(2 * ksize + 1):\n x = i - center\n y = j - center\n kernel[i, j] = (np.exp(-0.5 * (x**2 + y**2) / sigma**2))\n kernel = kernel / kernel.sum()\n return kernel\n\n\nclass TMQI(BaseMeasure):\n # definitions of the parameters are described in the paper\n def __init__(self, confiq):\n super(TMQI, self).__init__(confiq)\n self.a: float = 0.8012\n self.Alpha: float = 0.3046\n self.Beta: float = 0.7088\n self.level: int = 5\n self.weight: List[float] = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]\n self.f: int = 32\n self.window: np.ndarray = create_gaussian_filter(5, 1.5)\n\n def Slocal(self, L_hdr: np.ndarray, L_ldr: np.ndarray, sf: float) -> float:\n \"\"\" calculate the local StructuralFidelity score between one ldr and hdr image\n\n Arguments: \n {L_hdr: np.ndarray} -- Luminance channel of HDR Image \n {L_ldr: np.ndarray} -- Luminance channel of LDR Image \n {sf: float} -- spatial frequency\n Return: \n {float} -- local StructuralFidelity score \n \"\"\"\n # Default parameters\n C1: float = 0.01\n C2: float = 10\n window: np.ndarray = self.window # type: ignore\n\n # calculates the local mean with gaussian filter\n mu1 = convolve2d(L_hdr, window, mode='valid')\n mu2 = convolve2d(L_ldr, window, mode='valid')\n\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n\n # calculates the local variance with gaussian filter\n sigma1_sq = convolve2d(L_hdr * L_hdr, window, mode='valid') - mu1_sq\n sigma2_sq = convolve2d(L_ldr * L_ldr, window, mode='valid') - mu2_sq\n\n # calculates the local standard deviation\n sigma1 = np.sqrt(np.maximum(0, sigma1_sq))\n sigma2 = np.sqrt(np.maximum(0, sigma2_sq))\n\n # calculate cross-correlation between the HDR and LDR image patches\n sigma12 = convolve2d(L_hdr * L_ldr, window, mode='valid') - mu1_mu2\n\n # Mannos CSF Function\n CSF = 100 * 2.6 * (0.0192 + 0.114 * sf) * np.exp(-(0.114 * sf) ** 1.1)\n\n u_hdr = 128 / (1.4 * CSF)\n sig_hdr = u_hdr / 3\n sigma1p = stats.norm.cdf(sigma1, u_hdr, sig_hdr)\n\n u_ldr = u_hdr\n sig_ldr = u_ldr / 3\n\n sigma2p = stats.norm.cdf(sigma2, u_ldr, sig_ldr)\n\n # Definition of the local structural fidelity measure (see paper)\n s_map = (((2 * sigma1p * sigma2p) + C1) / ((sigma1p * sigma1p) +\n (sigma2p * sigma2p) + C1)) * ((sigma12 + C2) / (sigma1 * sigma2 + C2))\n s: float = np.mean(s_map)\n\n return s\n\n def StructuralFidelity(self, L_hdr: np.ndarray, L_ldr: np.ndarray, level: int, weight: List[float]) -> float:\n \"\"\" calculate multiple StructuralFidelity scores between ldr and hdr image by downscaling image n times\n\n Arguments: \n {L_hdr: np.ndarray} -- Luminance channel of HDR Image \n {L_ldr: np.ndarray} -- Luminance channel of LDR Image \n {level: int} -- specifies how many StructuralFidelity measurements are made\n {weight: List[float]} -- weighting list for respecitve local StructuralFidelity score\n\n Return: \n {np.ndarray} -- overall StructuralFidelity score \n\n \"\"\"\n downsample_filter = np.ones((2, 2)) / 4\n f = self.f\n s_local_list = []\n s_local = []\n for _ in range(level):\n f = f / 2\n s_local = self.Slocal(L_hdr, L_ldr, f)\n s_local_list.append(s_local)\n filtered_im1 = correlate2d(L_hdr, downsample_filter, boundary='symm', mode='same')\n filtered_im2 = correlate2d(L_ldr, downsample_filter, boundary='symm', mode='same')\n L_hdr = filtered_im1[::2, ::2] # downscale image by a factor of 4\n L_ldr = filtered_im2[::2, ::2]\n S = np.prod(np.power(s_local_list, weight))\n return S\n\n def blkproc(self, img: np.ndarray, block_size: tuple) -> np.ndarray:\n \"\"\" python implementation of matlab function blkproc. Applies a function to each block of image with sliding window\n\n Arguments: \n {img: np.ndarray} -- Luminance channel of image \n {block_size: tuple} -- size of each 2d block \n\n Return: \n {np.ndarray} -- transformed luminance channel of image \n \"\"\"\n # zero padding\n def std_function(x): return np.std(x, ddof=1) * np.ones(x.shape)\n h, w = img.shape[0:2]\n add_h = block_size[0] - (h % block_size[0])\n add_w = block_size[1] - (w % block_size[1])\n pad_image = np.pad(img, [(0, add_h), (0, add_w)], mode='constant', constant_values=0)\n rows, cols = pad_image.shape\n row_block, col_block = block_size\n\n # apply function operation to each block\n for row in range(0, rows, row_block):\n for col in range(0, cols, col_block):\n x = pad_image[row:row + row_block, col:col + col_block]\n pad_image[row:row + row_block, col:col + col_block] = std_function(x)\n # crop image to original size\n pad_image = pad_image[:h, :w]\n\n return pad_image\n\n def StatisticalNaturalness(self, L_ldr: np.ndarray) -> float:\n \"\"\" calculate the StatisticalNaturalness measurement for ldr image. The parameter were taken from the paper\n\n Arguments: \n {L_ldr: np.ndarray} -- Luminance channel of image \n\n Return: \n {np.ndarray} -- StatisticalNaturalness measurement\n \"\"\"\n u = np.mean(L_ldr)\n blocks = self.blkproc(L_ldr, (11, 11))\n blocks_mean = np.mean(blocks)\n # ------------------ Contrast measurement ----------\n par_beta = np.array([4.4, 10.1])\n beta_mode = (par_beta[0] - 1) / (par_beta[0] + par_beta[1] - 2)\n # Beta probability density function\n C_0 = stats.beta.pdf(beta_mode, par_beta[0], par_beta[1])\n C = stats.beta.pdf(blocks_mean / 64.29, par_beta[0], par_beta[1])\n pc = C / C_0\n # ---------------- Brightness measurement ---------\n mu = 115.94\n sigma = 27.99\n # Gaussian probability density functions\n B = stats.norm.pdf(u, mu, sigma)\n B_0 = stats.norm.pdf(mu, mu, sigma)\n pb = B / B_0\n N = pb * pc\n\n return N\n\n def img_preprocessing(self, hdrImage, ldrImage):\n assert hdrImage is not None is not ldrImage, \"One of the images could not be loaded\"\n assert hdrImage.shape[:2] == ldrImage.shape[:2], \"Images have different sizes\"\n assert len(hdrImage.shape) == len(ldrImage.shape), \"Images are not of the same type \"\n\n if hdrImage.ndim == 3: # for rgb image\n HDR = RGBtoYxy(hdrImage)\n L_hdr = HDR[:, :, 0] # extract luminance channel\n lmin = np.min(L_hdr)\n lmax = np.max(L_hdr)\n\n L_hdr = np.round((2**32 - 1) / (lmax - lmin)) * (L_hdr - lmin)\n L_ldr = RGBtoYxy(ldrImage)\n L_ldr = (L_ldr[:, :, 0])\n\n return (L_hdr, L_ldr)\n\n if hdrImage.ndim == 2: # for gray scale images or grayflag\n L_ldr = ldrImage\n lmin = np.min(hdrImage)\n lmax = np.max(hdrImage)\n # normalize luminance: range 0 to 2^32 - 1\n L_hdr = round((2**32 - 1) / (lmax - lmin)) * (hdrImage - lmin)\n\n return (L_hdr, L_ldr)\n\n def calculate_tmqi(self, seq_name, orig_implementation=True):\n sequence_length = len(self.dataset_data[seq_name][\"hdr\"])\n hdr_images_path = self.dataset_data[seq_name][\"hdr\"]\n ldr_images_path = self.experiment_data[seq_name][\"ldr\"]\n TMQI_val = 0\n with tqdm(total=sequence_length) as pbar:\n for hdr_image_path, ldr_image_path in zip(hdr_images_path, ldr_images_path):\n image_hdr = cv2.imread(\n str(hdr_image_path), cv2.IMREAD_ANYDEPTH)\n image_hdr = image_hdr.astype('float64')\n\n image_ldr = cv2.imread(\n str(ldr_image_path), cv2.IMREAD_ANYDEPTH)\n image_ldr = image_ldr.astype('float64')\n\n if orig_implementation:\n res = self.img_preprocessing(image_hdr, image_ldr)\n L_hdr = res[0]\n L_ldr = res[1]\n # %----------- structural fidelity measurement -----------------\n S = self.StructuralFidelity(\n L_hdr, L_ldr, self.level, self.weight)\n # %--------- statistical naturalness measurement ---------------\n N = self.StatisticalNaturalness(L_ldr)\n # %------------- overall quality measurement -------------------\n Q = self.a * (S**self.Alpha) + (1 - self.a) * (N**self.Beta)\n print(Q)\n else:\n raise Exception(\"not original implementation was removed\")\n\n TMQI_val += Q\n pbar.update(1)\n\n return TMQI_val / len(hdr_images_path)\n\n def calculate(self):\n TMQI_val = 0\n number_of_images = 0\n for sequence_name in self.eval_sequences:\n if sequence_name not in self.dataset_data:\n raise RuntimeError(\n f\"Evaluation sequence '{sequence_name}' not found in evaluation dataset.\")\n number_of_images += len(self.dataset_data[sequence_name]['hdr'])\n curr_TMQI_val = self.calculate_tmqi(sequence_name)\n TMQI_val += curr_TMQI_val\n\n # average measure over all evaluation sequences\n TMQI_val /= len(self.eval_sequences)\n\n return TMQI_val, len(self.eval_sequences), number_of_images\n\n\ndef main():\n with open(Path(__file__).parent.parent / \"config.json\", \"r\") as cfg_file:\n config = json.load(cfg_file)\n [tmqi_val, num_sequences, num_images] = TMQI(config).calculate()\n print(tmqi_val)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HensoldtOptronicsCV/ToneMappingIQA","sub_path":"measures/tmqi_measure/tmqi.py","file_name":"tmqi.py","file_ext":"py","file_size_in_byte":11832,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"12"} +{"seq_id":"35283613861","text":"import warnings\nwarnings.filterwarnings('ignore', message='Unclosed')\n\nfrom .api import (\n build, \n async_build,\n build_article,\n async_build_article, \n fulltext, \n hot, \n async_hot,\n languages,\n popular_urls, \n Configuration as Config\n)\n\nfrom .article import Article, ArticleException, AsyncArticle\nfrom .mthreading import NewsPool, AsyncNewsPool\nfrom .source import Source, AsyncSource\nfrom .version import __version__\n\nnews_pool = NewsPool()\nasync_news_pool = AsyncNewsPool()\n","repo_name":"GrowthEngineAI/newspaper4k","sub_path":"newz/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"35652009273","text":"from skimage import io, color\nfrom scipy import ndimage\nfrom scipy.ndimage import filters\nfrom math import pow\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef lab_saliency(file_name, size=3, display_result=False):\n \"\"\"The function calculates lab color space based(Frequency-tuned) saliency\nof input rgb_image\n\nParameters\n----------\nrgb_image: M x N x 3 array (assumed to be color image in rgb space)\nInput rgb image whose saliency map has to be calculated\nsize: scalar value, optional\nsize of the gaussian filter kernel to smooth the rgb_image\nReturns\n-------\nOutputs: 2D array(M x N)\nThe functions returns the saliency map of Input Image\nNumber of rows and Cols. of input is same as input image\nReferences\n----------\nAchanta,S. Hemami,F. Estrada,S. Susstrunk\n'Frequency-tuned Salient region detection'\nIEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2009),\n\"\"\"\n rgb_image = io.imread(file_name)\n #smooth the input rgb_image\n rgb_image = filters.gaussian_filter(rgb_image, size)\n #convert to lab color space\n lab_image = color.rgb2lab(rgb_image)\n mean = np.asarray([lab_image[:, :, 0].mean(), lab_image[:, :, 1].mean(), lab_image[:, :, 2].mean()])\n mean_subtracted = (lab_image - mean)**2\n saliency_map = mean_subtracted[:, :, 0] + mean_subtracted[:, :, 1] + mean_subtracted[:, :, 2]\n if(display_result):\n plt.imshow(saliency_map)\n plt.show()\n return saliency_map\n\n\ndef saliency_achanta(file_name):\n\n rgb = io.imread(file_name)\n \n #gfrgb = ndimage.gaussian_filter(rgb, 3)\n gfrgb = cv2.GaussianBlur(rgb, (3,3), 3)\n \n lab = color.rgb2lab(gfrgb)\n \n h, w, d = lab.shape\n \n l = lab[:,:,0]\n lm = np.mean(l)\n \n a = lab[:,:,1]\n am = np.mean(a)\n \n b = lab[:,:,2]\n bm = np.mean(b)\n \n sm = (l-lm)**2 + (a-am)**2 + (b-bm)**2\n\n return sm\n\ndef iisum(iimg,x1,y1,x2,y2):\n sum = 0\n if(x1>1 and y1>1) :\n sum = iimg[y2-1,x2-1]+iimg[y1-2,x1-2]-iimg[y1-2,x2-1]-iimg[y2-1,x1-2]\n elif(x1<=1 and y1>1) :\n sum = iimg[y2-1,x2-1]-iimg[y1-2,x2-1]\n elif(y1<=1 and x1>1) :\n sum = iimg[y2-1,x2-1]-iimg[y2-1,x1-2]\n else :\n sum = iimg[y2-1,x2-1]\n \n return sum\n\n\ndef msss_saliency(file_name, size=5):\n rgb = io.imread(file_name)\n \n #gfrgb = ndimage.gaussian_filter(rgb, 3, mode='mirror')\n gfrgb = filters.gaussian_filter(rgb, size)\n # gfrgb = cv2.GaussianBlur(rgb, (3,3), 5)\n #gfrgb = ndimage.gaussian_filter(rgb, 3)\n \n lab = color.rgb2lab(gfrgb)\n \n height, width, dim = lab.shape\n \n l = lab[:,:,0]\n # lm = np.mean(l)\n \n a = lab[:,:,1]\n # am = np.mean(a)\n \n b = lab[:,:,2]\n # bm = np.mean(b)\n \n # create integral images\n li = np.cumsum(np.cumsum(l, axis=1), axis=0)\n ai = np.cumsum(np.cumsum(a, axis=1), axis=0)\n bi = np.cumsum(np.cumsum(b, axis=1), axis=0)\n\n sm = np.zeros(shape=(height, width))\n for j in range(1, height+1):\n yo = min(j, height-j)\n y1 = max(1,j-yo)\n y2 = min(j+yo,height)\n for k in range(1, width+1):\n xo = min(k,width-k)\n x1 = max(1,k-xo)\n x2 = min(k+xo,width)\n invarea = 1.0/((y2-y1+1)*(x2-x1+1))\n lm = iisum(li,x1,y1,x2,y2)*invarea\n am = iisum(ai,x1,y1,x2,y2)*invarea\n bm = iisum(bi,x1,y1,x2,y2)*invarea\n #---------------------------------------------------------\n # Compute the saliency map\n #---------------------------------------------------------\n sm[j-1,k-1] = (l[j-1,k-1]-lm)**2 + (a[j-1,k-1]-am)**2 + (b[j-1,k-1]-bm)**2\n\n img = (sm-np.min(sm))/(np.max(sm)-np.min(sm))\n\n return img\n\n\ndef minmaxnormalization(vector):\n \"\"\"\nMakes the min max normalization over a numpy vector\n$$ v_i = (v_i - min(v)) / max(v)\n\"\"\"\n vector = vector - (vector.min())\n vector = vector / (vector.max() - vector.min())\n return vector\n\n \n\ndef frequency_tuned_saliency(img_src):\n \"\"\"\nFrequency Tuned Saliency.\nFind the Euclidean distance between\nthe Lab pixel vector in a Gaussian filtered image\nwith the average Lab vector for the input image.\nR. Achanta, S. Hemami, F. Estrada and S. Susstrunk,\nFrequency-tuned Salient Region\nDetection, IEEE International Conference on Computer\nVision and Pattern Recognition\n\nArgs:\nimage (np.array): an image.\n\nReturns:\na 2d image saliency map.\n\"\"\"\n image = cv2.imread(img_src)\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)\n #mean of each channel\n means = []\n for c in range(image.shape[2]):\n means.append(image[:, :, c].mean())\n means = np.asarray(means)\n\n image = cv2.medianBlur(image, 9)\n dist = (image - means) ** 2\n print(\"mean color is %s\" % means)\n salmap = np.zeros((dist.shape[0], dist.shape[1]))\n for i in range(dist.shape[0]):\n for j in range(dist.shape[1]):\n salmap[i][j] = np.sqrt(dist[i][j].sum())\n\n return minmaxnormalization(salmap) \n","repo_name":"vyzuer/photography_assistance","sub_path":"src/saliency_achanta.py","file_name":"saliency_achanta.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"32372256364","text":"import sys\n\nsys.path.insert(0, \"./users\")\nfrom FreeUser import FreeUsers\nfrom PremiumUser import PremiumUsers\n\n\n# create new users\nuser1 = FreeUsers(\"Steve\", \"123 Main St.\", \"steve@email.com\", 444)\nuser2 = FreeUsers(\"Joe\", \"456 Main St.\", \"joe@email.com\", 111)\nuser3 = PremiumUsers(\"Tom\", \"789 Main St.\", \"joe@email.com\", 999)\n\n# print basic user info\nprint(user1)\nprint(user2)\nprint(user3)\n\nprint(\"-----------------------------------\")\n\n# user 1 messages\nuser1.add_post(\"test1\")\nuser1.add_post(\"test2\")\nuser1.add_post(\"test3\") # should not work\n\n# user 2 messages\nuser2.add_post(\"user 2 was here\")\nuser2.add_post(\"my second message\")\n\n# user 3 messages\nuser3.add_post(\"I'm a premium user.\")\nuser3.add_post(\"I can write as many messages as I want.\")\nuser3.add_post(\"Here's my third pos.t\")\n\n# show user 1 posts\nprint(user1.show_posts())\n# show user 2 posts\nprint(user2.show_posts())\n# show user 3 posts\nprint(user3.show_posts())\n\n# add discount code to user 3\nuser3.set_code(12)\n# show user 3 discount code\nprint(user3.get_code())\n","repo_name":"stephanlamoureux/codeplatoon-devops","sub_path":"python/app-users-III/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"10146166338","text":"import pickle\nimport cv2\nimport os\nfrom scipy.spatial import distance\nimport copy\nfrom matplotlib import pyplot as plt\n\n\n\n\n\nfolder_path = '/home/simenvg/environments/my_env/datasyn/project/JPEGImages'\nfolder_path_resized = '/home/simenvg/environments/my_env/datasyn/project/resized_images'\n\n\nBLUE = (255,0,0)\nRED = (0,0,255)\nGREEN = (0,255,0)\nYELLOW = (255,255,0)\n\n\nimages_YOLO = pickle.load(open(os.path.join(folder_path,'YOLO.txt'), \"rb\"))\nimages_GT = pickle.load(open(os.path.join(folder_path,'ground_truth.txt'), \"rb\"))\n\nimages_YOLO_resized = pickle.load(open(os.path.join(folder_path_resized,'YOLO.txt'), \"rb\"))\nimages_GT_resized = pickle.load(open(os.path.join(folder_path_resized,'ground_truth_resized.txt'), \"rb\"))\n\nfilenames = []\nfilenames_resized = []\n\nfor keys, values in images_GT.items():\n\tfilenames.append(keys)\n\nfor keys, values in images_GT_resized.items():\n\tfilenames_resized.append(keys)\n\n\nimages_YOLO_conf_over_25 = {}\n\nfor key, value in images_YOLO.items():\n\tboxes = []\n\tscores = []\n\tfor i in range(len(value[0])):\n\t\tif value[1][i] >= 0.25:\n\t\t\tboxes.append(value[0][i])\n\t\t\tscores.append(value[1][i])\n\timages_YOLO_conf_over_25[key] = (boxes, scores)\n\n\n\n\ndef intersect_area(box_1, box_2): # returns None if rectangles don't intersect\n\tdx = min(max(box_1[1][0], box_1[0][0]), box_2[1][0]) - max(min(box_1[1][0], box_1[0][0]), box_2[0][0])\n\tdy = min(max(box_1[1][1], box_1[0][1]), box_2[1][1]) - max(min(box_1[1][1], box_1[0][1]), box_2[0][1])\n\t#print('dx: ', dx, ' dy: ', dy)\n\tif (dx>=0) and (dy>=0):\n\t\treturn dx*dy\n\telse:\n\t\treturn -1\n\n\ndef intersect_over_union(box_1, box_2):\n\t#print('box_1: ', box_1, ' box_2: ', box_2)\n\tarea_box_1 = abs((box_1[1][0] - box_1[0][0]) * (box_1[1][1] - box_1[0][1]))\n\tarea_box_2 = abs((box_2[1][0] - box_2[0][0]) * (box_2[1][1] - box_2[0][1]))\n\tintersection = intersect_area(box_1, box_2)\n\t#print('area_box_1: ', area_box_1, ' area_box_2: ', area_box_2, ' intersection: ', intersection)\n\tif intersection == -1:\n\t\treturn -1\n\telse:\n\t\treturn intersection / (area_box_1 + area_box_2 - intersection)\n\n\ndef validated_detected_objects(detected_boxes, GT_boxes):\n\tapproved_boxes = []\n\ttemp_detected_boxes = copy.copy(detected_boxes)\n\tfor GT_box in GT_boxes:\n\t\tif len(temp_detected_boxes) > 0:\n\t\t\tfound_box = False\n\t\t\tbest_iou = intersect_over_union(GT_box, temp_detected_boxes[0])\n\t\t\tif best_iou >= 0.5:\n\t\t\t\tfound_box = True\n\t\t\tbest_box = temp_detected_boxes[0]\n\t\t\tfor i in range(1, len(temp_detected_boxes)):\n\t\t\t\tiou = intersect_over_union(GT_box, temp_detected_boxes[i])\n\t\t\t\tif iou >= 0.5 and iou > best_iou:\n\t\t\t\t\tfound_box = True\n\t\t\t\t\tbest_box = temp_detected_boxes[i]\n\t\t\t\t\tbest_iou = iou\n\t\t\tif found_box:\n\t\t\t\tapproved_boxes.append(best_box)\n\t\t\t\ttemp_detected_boxes.remove(best_box)\n\treturn approved_boxes\n\ndef get_box_center(box):\n\tx = abs((box[0][0] + box[1][0])/2)\n\ty = abs((box[0][1] + box[1][1])/2)\n\treturn (x,y)\n\n\ndef euc_dist(point_1, point_2):\n\treturn distance.euclidean(point_1, point_2)\n\n\ndef get_precision(filenames, GT_boxes, detected_boxes):\n\tsum_detected_objects = 0\n\tsum_true_positives = 0\n\tfor image in filenames:\n\t\tsum_detected_objects += len(detected_boxes[image])\n\t\tsum_true_positives += len(validated_detected_objects(detected_boxes[image], GT_boxes[image]))\n\tif sum_detected_objects == 0:\n\t\treturn -1\n\treturn sum_true_positives / sum_detected_objects\n\ndef get_recall(filenames, GT_boxes, detected_boxes):\n\tsum_GT_boxes = 0\n\tsum_true_positives = 0\n\tfor image in filenames:\n\t\tsum_GT_boxes += len(GT_boxes[image])\n\t\tsum_true_positives += len(validated_detected_objects(detected_boxes[image], GT_boxes[image]))\n\treturn sum_true_positives / sum_GT_boxes\n\n\ndef boxes_based_on_score(filenames, detected_boxes, conf_level):\n\timages = {}\n\tfor image in filenames:\n\t\tboxes = []\n\t\tfor i in range(len(detected_boxes[image][0])):\n\t\t\tif float(detected_boxes[image][1][i]) > conf_level:\n\t\t\t\tboxes.append(detected_boxes[image][0][i])\n\t\timages[image] = boxes\n\treturn images\n\n\ndef plot_precision_recall(yolo_prec_recall, yolo_prec_recall_resized):\n\tfig_1, ax_1 = plt.subplots()\n\tfor i in range(len(yolo_prec_recall[0])):\n\t\tif yolo_prec_recall[0][i] != -1:\n\t\t\tax_1.plot(yolo_prec_recall[0][i], yolo_prec_recall[1][i], 'o', color='red', label='YOLO')#, AP: ' + str(round(get_average_precision(yolo_prec_recall),2)))\n\tfor i in range(len(yolo_prec_recall[0])):\n\t\tif yolo_prec_recall_resized[0][i] != -1:\n\t\t\tax_1.plot(yolo_prec_recall_resized[0][i], yolo_prec_recall[1][i], 'o', color='blue', label='YOLO resized images')\n\tax_1.set_title('Precision/Recall')\n\tplt.grid()\n\tplt.legend()\n\tplt.xlabel('Recall')\n\tplt.ylabel('Precision')\n\tplt.show()\n\n\n\nthresholds = [0.999, 0.99]\n\na = 0.95\nwhile a >= 0.01:\n\tthresholds.append(a)\n\ta -= 0.01\n\nthresholds.extend([0.001, 0.000001, 0.0000000001])\n\n\ndef generate_recall_and_precisions(filenames, GT_boxes, detected_boxes, thresholds):\n\trecalls = []\n\tprecisions = []\n\tfor elem in thresholds:\n\t\trecalls.append(get_recall(filenames, GT_boxes, boxes_based_on_score(filenames, detected_boxes, elem)))\n\t\tprecisions.append(get_precision(filenames, GT_boxes, boxes_based_on_score(filenames, detected_boxes, elem)))\n\n\treturn (precisions, recalls)\n\n\ndef get_average_precision(prec_recall):\n\tprecisions = prec_recall[0]\n\tsum_precisions = 0\n\tfor elem in precisions:\n\t\tsum_precisions += elem\n\treturn sum_precisions/len(precisions)\n\n\n\nyolo_prec_recall = generate_recall_and_precisions(filenames, images_GT, images_YOLO, thresholds)\nyolo_prec_recall_resized = generate_recall_and_precisions(filenames_resized, images_GT_resized, images_YOLO_resized, thresholds)\n\n\nprint('YOLO AP: ', get_average_precision(yolo_prec_recall))\n\nplot_precision_recall(yolo_prec_recall, yolo_prec_recall_resized)\n\nsum_GT_boxes = 0\nsum_validated_boxes = 0\nfor image in filenames:\n\tvalidated_boxes = validated_detected_objects(images_YOLO_conf_over_25[image][0], images_GT[image])\n\tsum_validated_boxes += len(validated_boxes)\n\tsum_GT_boxes += len(images_GT[image])\n\n\nprint('Detected ', sum_validated_boxes, ' out of ', sum_GT_boxes, ' boats, ', sum_validated_boxes * 100/ sum_GT_boxes, ' %')\n\n\n\n\n\ndef draw_boxes(image_name, GT_boxes, detected_boxes):\n\tvalidated_boxes = validated_detected_objects(detected_boxes, GT_boxes)\n\timg = cv2.imread(os.path.join(folder_path,image_name))\n\tfor box in detected_boxes:\n\t\tcv2.rectangle(img, box[0], box[1], RED, 1)\n\tfor box in validated_boxes:\n\t\tcv2.rectangle(img, box[0], box[1], BLUE, 2)\n\tfor box in GT_boxes:\n\t\tcv2.rectangle(img, box[0], box[1], GREEN, 1)\n\tcv2.imshow(image_name, img)\n\tcv2.waitKey(0)\n\n\n# for image in filenames:\n# \tdraw_boxes(image, images_GT[image], images_YOLO_conf_over_25[image][0])\n\n\n# image1 = 'resized_93_flip.jpg'\n# image2 = 'resized_48.jpg'\n# image3 = 'resized_80.jpg'\n# image4 = 'resized_106_flip.jpg'\n# draw_boxes(image1, images_GT[image1], images_YOLO_conf_over_25[image1][0])\n# draw_boxes(image2, images_GT[image2], images_YOLO_conf_over_25[image2][0])\n# draw_boxes(image3, images_GT[image3], images_YOLO_conf_over_25[image3][0])\n# draw_boxes(image4, images_GT[image4], images_YOLO_conf_over_25[image4][0])","repo_name":"simenvg/TDT4265_project","sub_path":"test_detections.py","file_name":"test_detections.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73846500180","text":"from asammdf import MDF\nimport argparse\nfrom os import path\nimport re\n\ndef log(*messages):\n if args.verbose:\n print(*messages)\n\nparser = argparse.ArgumentParser(prog = 'data-overlay preprocessing')\nparser.add_argument('filename')\nparser.add_argument('-v', '--verbose', action='store_true')\nparser.add_argument('-c', '--channel', action='append', nargs='+')\nparser.add_argument('-s', '--start-time', type=float, default=0)\nparser.add_argument('-e', '--end-time', type=float, default=-1)\nparser.add_argument('-r', '--sample-rate', type=int, default=24)\nparser.add_argument('-o', '--output', type=str, default='output')\n\nargs = parser.parse_args()\n\n\nlog(\"Verbose mode is ON\")\nlog(\"Filename: \", args.filename)\nlog(\"Channel: \", args.channel)\nlog(\"Start time: \", args.start_time)\nlog(\"End time: \", args.end_time)\nlog(\"Sample rate: \", args.sample_rate)\nlog(\"Output: \", args.output)\n\n# reduce array of arrays with strings to a single array with strings\nchannel_names = []\nfor channel in args.channel:\n for name in channel:\n channel_names.append(name)\n\nlog(\"Channel names: \", channel_names)\n\n\ndef get_channel_list(mdf):\n mdf_info = mdf.info()\n channel_names = list()\n for x, y in mdf_info.items():\n if isinstance(y, dict):\n xxx = y.get(\"channel 1\")\n start_index = xxx.find(\"\\\"\")\n end_index = xxx.find(\"\\\"\", start_index + 1)\n quote = xxx[start_index + 1: end_index]\n channel_names.append(quote)\n return channel_names\n\ndef get_potential_signals(channel_name, mdf_chn_lst):\n relevant_signals = []\n p = re.compile('^[A-Za-z0-9_]{0,}' + channel_name + '[A-Za-z0-9_]{0,}$')\n temp_list = [s for s in mdf_chn_lst if p.match(s)]\n relevant_signals = relevant_signals + temp_list\n return relevant_signals\n\n# check on filesystem if file exists\nif not path.exists(args.filename):\n print(\"ERROR: MDF input file '\", args.filename, \"' does not exist!\")\n exit(1)\n\nmdf = MDF(args.filename)\n\nstart_time = args.start_time\nend_time = args.end_time\nsample_rate = args.sample_rate\n\nall_channel_list = get_channel_list(mdf)\n\nfor channel_name in channel_names:\n if channel_name in all_channel_list:\n if args.verbose:\n log(channel_name, \"is in the list\")\n else:\n alternative_signals = get_potential_signals(channel_name, all_channel_list)\n print(\"ERROR: '\", channel_name, \"' is NOT present in the MDF file\")\n if args.verbose:\n log(\"Instead these are found:\")\n for alternative_signal in alternative_signals:\n log(alternative_signal)\n exit(1)\n\ndelta_t_list = []\nfor channel_name in channel_names:\n timestamps = mdf.get(channel_name).timestamps\n delta_t = (timestamps[-1] - timestamps[0]) / timestamps.size\n delta_t_list.append(round(delta_t, 4))\nmin_delta_t = min(delta_t_list)\nlog(\"Minimum delta t: \", min_delta_t)\n\n\nlog(\"Filtering channels\")\nmdf = mdf.filter(channel_names)\n\nlog(\"Resampling to minimum delta t\")\nmdf = mdf.resample(raster=min_delta_t)\n\nlog(\"Trimming start and end time\")\nif end_time == -1:\n #log(\"End time not specified, using max time of \", end_time)\n mdf = mdf.cut(start=start_time)\nelse:\n mdf = mdf.cut(start=start_time, stop=end_time)\nlog(\"Resampling to \", sample_rate, \"Hz\")\nmdf = mdf.resample(raster=1/sample_rate)\n\nlog(\"Exporting to \", args.output)\nmf4_filename = args.output + \".mf4\"\ncsv_filename = args.output + \".csv\"\nmdf.export(fmt='csv', filename=csv_filename, single_time_base=True, overwrite=True)\nlog(\"Done exporting csv\")\nmdf.save(mf4_filename, overwrite=True)\nlog(\"Done exporting mf4\")\n","repo_name":"greenteam-stuttgart/data-overlay","sub_path":"data-preprocessing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2416857527","text":"from TestingEssentials import EvaluateCmax\nimport copy \n\n# BruteForce: \n# przyjmuje: _J zbiór wszystkich zadań\n# zwraca: BestCmax - najlepszy możliwy czas Cmax wykonywania dla danego zbioru _J\n# Uwaga: algorytm drzewiasty o bardzo dużej złożoności obliczeniowej (O(n!)), \n# nie dawać dużego n i M\n\ndef BruteForce(_J): \n BestCmax = float('inf')\n for j in _J:\n BranchCmax, BranchPi = ProcedureBruteForce(_J,j,[])\n if BestCmax > BranchCmax:\n BestCmax = BranchCmax\n BestPi = copy.deepcopy(BranchPi)\n return BestCmax, BestPi\n\n\n# Procedura BruteForce: \n# przyjmuje: _J - pozostałe zadania, \n# _j - przekładane zadanie, \n# _pi - zadania (nie numeracje) ułożone w kolejności wykonywania.\n# zwraca: Cmax - najlepszy możliwy czas wykonywania Cmax dla danego rozgałęzienia lub liścia.\n# Uwaga: algorytm drzewiasty o bardzo dużej złożoności obliczeniowej (O(n!)), \n# nie dawać dużego n i M\n\ndef ProcedureBruteForce(_J,_j,_pi): \n N = copy.deepcopy(_J)\n pi = copy.deepcopy(_pi)\n pi.append(_j)\n N.remove(_j)\n BestCmax = float('inf')\n if N:\n for j in N:\n BranchCmax, BranchPi = ProcedureBruteForce(N,j,pi)\n if BestCmax > BranchCmax:\n BestCmax = BranchCmax\n BestPi = copy.deepcopy(BranchPi)\n else:\n Cmax = EvaluateCmax(pi)\n return Cmax, pi\n return BestCmax, BestPi","repo_name":"szolkiewicz/spd2023","sub_path":"lab3/BruteForce.py","file_name":"BruteForce.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38788605768","text":"#encoding=utf-8\nimport sys\n#file=open(r'C:\\Users\\danny\\test.txt','w')\n#file.write('ssss')\n#file.close()\n#del file\n\na={}\na[(2,3,4)]='王超'\na.update(name= 'wc',age= 22)#合并两个字典\nvalue={'name':'wc','age': 22}\nprint(value)\nv=dict(name='wc',age=22)\nprint(v)\nprint(a)\nb=a.pop('name')\nprint(a)\nsys.stdout.write(b+'\\n')\ni=10\nwhile i:\n i -= 1\n sys.stdout.write(str(i))\nelse:\n sys.stdout.write(str(i))","repo_name":"wang-just/test","sub_path":"CreateFile.py","file_name":"CreateFile.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12578316478","text":" \n\"\"\"\nFile: cluster.py\nAuthor: zhanghao55(zhanghao55@baidu.com)\nDate: 2019/12/10 15:26:16\n\"\"\"\n\nimport logging\nimport os\nimport sys\n_cur_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(\"%s/../\" % _cur_dir)\n\nfrom model.cluster_model_impl import BaseCluster\nfrom utils.logger import init_log\nfrom feature.feature_generator import FeatureGenerator\nfrom preprocess import ProcessFilePath\ninit_log()\n\nimport config\n\n\nclass ClusterDemo(BaseCluster):\n def __init__(self, mid_data_dir, model_dir, output_dir):\n super(ClusterDemo, self).__init__(model_dir, output_dir)\n self.mid_data_paths = ProcessFilePath(output_dir=mid_data_dir)\n\n self.feature_generator = FeatureGenerator(\n seg_method=config.seg_method,\n segdict_path=config.segdict_path,\n stopword_path=config.stopword_path,\n ngram=config.ngram,\n feature_min_length=config.feature_min_length)\n\n FeatureGenerator.save(self.feature_generator, self.generator_path, True)\n self.line_process_num = 0\n logging.info(\"ClusterDemo init succeed\")\n\n def cluster_feature_label_gen(self, line):\n \"\"\"根据字符串 提取其类别、特征 组成二元组\n [in] line: str, 数据集每一行的内容\n [out] res: (int, str), 二元组由类别和特征组成 特征由空格连接为字符串\n \"\"\"\n parts = line.strip(\"\\n\").split(\"\\t\")\n text = parts[7] + parts[8]\n self.line_process_num += 1\n feature_list = self.feature_generator.gen_feature(text, duplicate=config.duplicate)\n if self.line_process_num % 4000 == 0:\n seg_text = \"/ \".join(self.feature_generator.seg_words(text))\n logging.debug(\"process line num #%d\" % self.line_process_num)\n logging.debug(\"origin : %s\" % text.encode(\"gb18030\"))\n logging.debug(\"=\"*150)\n logging.debug(\"seg res : %s\" % seg_text.encode(\"gb18030\"))\n features = feature_list if config.duplicate else set(feature_list)\n return (0, \" \".join(features))\n\n def preprocess(self, data_dir):\n \"\"\"根据指定目录 获得数据特征\n [out] train_data_vec: matrix, 数据集特征\n \"\"\"\n self.feature_label_gen = self.cluster_feature_label_gen\n self.line_process_num = 0\n\n super(ClusterDemo, self).preprocess(\n data_dir,\n re_seg=config.re_seg,\n to_file=config.to_file,\n mid_data_paths=self.mid_data_paths,\n split_train_test=config.split_train_test,\n test_ratio=config.test_ratio,\n vec_method=config.vec_method,\n feature_select=config.feature_select,\n is_percent=config.is_percent,\n feature_keep_percent=config.feature_keep_percent,\n feature_keep_num=config.feature_keep_num,\n min_df=config.min_df,\n )\n\n def cluster(self,\n data_path,\n n_clusters=100,\n params={'n_clusters': [5, 10, 20, 50, 75, 100]},\n grid_search=True):\n \"\"\"根据数据特征进行聚类\n \"\"\"\n super(ClusterDemo, self).cluster(data_path, n_clusters, params, grid_search)\n\n\ndef main():\n cluster = ClusterDemo(\n mid_data_dir=config.mid_data_dir,\n model_dir=config.model_dir,\n output_dir=config.output_dir)\n cluster.cluster(data_path=config.train_data_dir, grid_search=False)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"HawChang/text_utils","sub_path":"demo/cluster_demo.py","file_name":"cluster_demo.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"1977784878","text":"from django.urls import path\nfrom . import views\nfrom django.contrib import admin\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n path('home/', views.home, name='home'),\n path('course/', views.course, name='course'),\n path('register/', views.register, name='register'),\n path('about/',views.runoob),\n path('recommendation/', views.recommendation, name='recommendation'),\n]","repo_name":"GreatShadow/SCRS","sub_path":"students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9332358036","text":"#!/usr/bin/python\nimport libvoikko\nimport re\n# import pynvim\n\n\nclass Oiko():\n\n def __init__(self):\n self.voikko = libvoikko.Voikko(\"fi-FI\")\n self.__teksti = ''\n self.poista = re.compile('\\.|,|:|;|\\(|\\)|')\n\n @property\n def teksti(self):\n return self.__teksti\n\n @teksti.setter\n def teksti(self, p):\n self.__teksti = p\n \n def korjaa_rivi(self, rivi):\n typot = set([sana for sana in rivi.split() \n if not self.voikko.spell(sana)])\n return typot\n\n def tekstin_typot(self):\n typot = set()\n for p in self.teksti:\n p_siivottu = self.poista.sub('',p)\n typot.update(self.korjaa_rivi(p_siivottu)) \n return \"\\|\".join(map(str,typot)) \n\nif __name__ == \"__main__\":\n o = Oiko()\n o.teksti = ['kisssa kissa kissalla koira koera', 'koera vesi voda vasi vaasi']\n for r in o.teksti:\n print(r)\n tulos = o.tekstin_typot()\n print(tulos)\n","repo_name":"heikkiPnen/dotfiles","sub_path":"dot_config/nvim/pack/start/oikoluku/rplugin/python3/executable_oiko.py","file_name":"executable_oiko.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8949673255","text":"import os\nfrom dotenv import load_dotenv\nfrom twitchio.ext import commands\nimport twitchio\n\n\ndotenv_path = os.path.join(os.path.dirname('__file__'), '.env')\nload_dotenv(dotenv_path)\nTOKEN = os.getenv('TOKEN')\nNICK = os.getenv('NICK')\nCHANNEL = os.getenv('CHANNEL')\n\n\nclass TwitchBot(commands.Bot):\n def __init__(self):\n super().__init__(irc_token=TOKEN, nick=NICK, prefix='!',\n initial_channels=[CHANNEL])\n\n async def event_ready(self):\n print(f'Ready | {self.nick}')\n\n async def event_message(self, message):\n print(message.content)\n await self.handle_commands(message)\n\n @commands.command(name='test')\n async def my_command(self, ctx: twitchio.Context):\n await ctx.send(f\"Hello {ctx.author.name}\")\n\n @commands.command(name='hello')\n async def hello_command(self, ctx: twitchio.Context):\n await ctx.send(f\"Hello {ctx.author.name}!\")\n\n\ntwitch = TwitchBot()\ntwitch.run()\n","repo_name":"Accoustium/twitchbot","sub_path":"twitchbot.py","file_name":"twitchbot.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"37547383274","text":"#!python3\n\nimport os\nimport sys\n\nfrom common import retrieve_commits, parse_subject\n\n\nSTART_COMMIT = 'CI_COMMIT_BEFORE_SHA'\nCUR_COMMIT = 'CI_COMMIT_SHA'\nBRANCH_NAME = 'CI_COMMIT_REF_NAME'\nPROJECT_DIR = 'CI_PROJECT_DIR'\n\n\ndef validate_commit_msgs():\n env = os.environ\n commits = retrieve_commits(\n env[PROJECT_DIR],\n env[START_COMMIT] if env[BRANCH_NAME] == 'master' else 'origin/master',\n env[CUR_COMMIT])\n for subject in map(parse_subject, commits):\n if not subject.is_valid:\n print()\n print(\"-\"*60)\n print(f\"UNGUELTIGE COMMIT MESSAGE: {subject.text}\")\n print(\"-\"*60)\n print()\n msg = subject.text.upper()\n if env[BRANCH_NAME] == 'master' or \\\n not (msg.startswith(\"DRAFT:\") or msg.startswith(\"WIP:\")):\n sys.exit(-1)\n\n\ndef main():\n validate_commit_msgs()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"baltech-ag/ci-scripts","sub_path":"commit_msg_validate.py","file_name":"commit_msg_validate.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15185644534","text":"# Foundations of Data Science: K-Means Clustering in Python\n# by University of London & Goldsmiths, Coursera\n# Week 4: Introducing Pandas and Using K-Means to Analyse Data\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('files/happyscore_income.csv')\ndf = df.rename(columns={'GDP': 'x', 'happyScore': 'y'})\ndf = df[['country', 'x', 'y']]\ndf = df.set_index('country')\n\n# Domain Standardization of Data\ndf_min = np.min(df, 0)\ndf_max = np.max(df, 0)\ndf = (df - df_min) / (df_max - df_min)\n\n# Define the datapoints of interest and drop them from the global dataframe\ndf_poi = df.loc[[\n\t'Mozambique',\n\t'Gabon',\n\t'Costa Rica'\n\t]]\ndf = df.drop(df_poi.index)\n\n# Start of Plotting\nplt.title('Foundations of Data Science: K-Means Clustering in Python\\nWeek 4, Peer-graded Assignment')\nplt.xlabel('GDP per Capita (normalised)')\nplt.ylabel('Happiness Score (normalised)')\nplt.axis([-0.1, 1.1, -0.1, 1.1])\n\n# Plot and Label Datapoints of Interest\nplt.scatter(df_poi['x'], df_poi['y'], label='Point of Interest')\t#Plot the datapoints of interest\nplt.plot(df_poi['x'], df_poi['y'], ls=':', alpha=0.5)\t\t\t\t#Draw lines between the datapoints\nfor country in df_poi.index:\n\tplt.text(\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Label the datapoints on the graph\n\t\tdf_poi.loc[country, 'x'] - 0.01,\n\t\tdf_poi.loc[country, 'y'] + 0.01,\n\t\tf\"{country}\"\n\t)\n\n# Plot all other Datapoints\nplt.scatter(df['x'], df['y'], alpha=0.75, label='Other Countries')\n\nplt.legend()\nplt.show()","repo_name":"anirban314/coursework","sub_path":"K-Means Clustering in Python/points-of-interest.py","file_name":"points-of-interest.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32442731679","text":"import json\nimport re\nimport numpy as np\nfrom itertools import cycle\n\ndict_values = {}\nstart_pos = 5\nend_pos = 12\n\n\ndef data(data):\n for d in data:\n if len(d['data']) >= 3:\n cnt = 1\n for i in range(len(d['data'])):\n if re.match(r'\\s{1,7}\\d\\.', string=d['data'][i]):\n name = d['data'][i]\n name = str(cnt) + ' |' + d['data'][i]\n if name in dict:\n name = str(cnt) + name\n dict[name] = []\n else:\n dict[name] = []\n cnt += 1\n\n key = list(dict.keys())\n j = 0\n k = 0\n arr_data = []\n for d in data:\n if k < 30:\n if len(d['data'][start_pos:end_pos]):\n for i in range(len(d['data'][start_pos:end_pos])):\n arr_data.append((key[j], ['data'][start_pos:end_pos][i]))\n dict[key[j]].append(d['data'][start_pos:end_pos][i])\n j += 1\n k += 1\n else:\n break\n\n\ndef save_data(data):\n names = []\n arr = []\n values = []\n for d in data:\n if len(d['data']) >= 3:\n cnt = 1\n for i in range(len(d['data'])):\n if (re.match(r'\\s{1,7}\\d\\.', string=d['data'][i])):\n name = str(cnt) + ' |' + d['data'][i]\n if name in arr:\n name = str(cnt) + name\n arr.append(name)\n names.append(name)\n else:\n arr.append(name)\n names.append(name)\n cnt += 1\n if len(d['data'][start_pos:end_pos]):\n for i in range(len(d['data'][start_pos:end_pos])):\n arr.append(d['data'][start_pos:end_pos][i])\n values.append(d['data'][start_pos:end_pos][i])\n return arr, names, values\n\n\ndef distance(arr, names):\n sum = 0\n sort_names = []\n distances_names = []\n distance_values = []\n for name in names:\n if re.findall('[1]\\.', string=name):\n sort_names.append(name)\n sort_names.append(names[-1])\n for j in range(len(sort_names) - 1):\n a = sort_names[j]\n b = sort_names[j + 1]\n distance_name = abs(names.index(a) - names.index(b))\n distances_names.append(distance_name)\n distances_names[-1] += 1\n\n for i in range(len(sort_names) - 2):\n a = sort_names[i]\n b = sort_names[i + 1]\n\n distance_value = abs(arr.index(a) - arr.index(b))\n sum = sum + distance_value\n distance_values.append(sum)\n distance_values.append(len(arr))\n return distance_values, distances_names\n\n\ndef fill_gaps(distance_values, distances_names, arr):\n start = 0\n arr_index = []\n for gap in range(len(distance_values)):\n for i in arr[start:distance_values[gap]]:\n if distances_names[gap] == 1:\n arr_index.append(i)\n elif distances_names[gap] == 2:\n if arr.index(i) % distances_names[gap] == 0:\n arr_index.append(arr.index(i))\n elif arr.index(i) % distances_names[gap] != 0:\n arr_index.append(arr.index(i))\n elif distances_names[gap] == 3:\n arr_index.append(arr.index(i) + 3)\n start = 0\n start += distance_values[gap]\n\n\ndef make_list(distance_values, distances_names, arr):\n listochek = []\n start_names = 0\n start_distance = 5\n for i in range(len(distance_values)):\n if i == 3:\n start_names += 1\n elif i == 6:\n start_names += 1\n stop_names = distances_names[i] + start_names\n names = (arr[start_names:stop_names])\n stop_names = stop_names - distances_names[i]\n values = (arr[start_distance:distance_values[i]])\n listochek.append((list(zip(cycle(names), values))))\n start_distance += len(values) + distances_names[i]\n start_names += distances_names[i] + len(values)\n return listochek\n\n\ndef make_dictionary(listochek):\n dictionary = {}\n for lst in listochek:\n for item in lst:\n if item[0] not in dictionary:\n dictionary[item[0]] = []\n dictionary[item[0]].append(item[1])\n return dictionary\n","repo_name":"wadimAI3337/projectMIEM2023","sub_path":"projectMIEM/to_json.py","file_name":"to_json.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24397180998","text":"#! /usr/bin/env python3\n\n\ndef read_data(filename):\n with open(filename, 'rt') as file:\n return [line.strip() for line in file.readlines()]\n\n\nclass Grid:\n def __init__(self, raw_data):\n self._grid = []\n for line in raw_data:\n self._grid.append([int(x) for x in line])\n self._nrows = len(self._grid)\n self._ncols = len(self._grid[0])\n\n def _flash_cell(self, r0, c0):\n for r in range(r0 - 1, r0 + 2):\n if r < 0 or r >= self._nrows:\n continue\n for c in range(c0 - 1, c0 + 2):\n if c < 0 or c >= self._ncols:\n continue\n if r == r0 and c == c0:\n continue\n self._increment_cell_level(r, c)\n\n def _increment_cell_level(self, r, c):\n if self._grid[r][c] <= 9:\n self._grid[r][c] += 1\n if self._grid[r][c] > 9:\n self._flash_cell(r, c)\n\n def _increment_all_levels(self):\n for r in range(0, self._nrows):\n for c in range(0, self._ncols):\n self._increment_cell_level(r, c)\n\n def _reset_energy_levels(self):\n cnt = 0\n for r in range(0, self._nrows):\n for c in range(0, self._ncols):\n if self._grid[r][c] > 9:\n self._grid[r][c] = 0\n cnt += 1\n return cnt\n\n def execute_steps(self, num):\n resets = 0\n for i in range(0, num):\n self._increment_all_levels()\n resets += self._reset_energy_levels()\n return resets\n\n def execute_until_all_flash(self):\n n = 1\n ncells = self._nrows * self._ncols\n while True:\n self._increment_all_levels()\n if self._reset_energy_levels() == ncells:\n return n\n n += 1\n\n\ndef do_part1(raw_data):\n grid = Grid(raw_data)\n return grid.execute_steps(100)\n\n\ndef do_part2(raw_data):\n grid = Grid(raw_data)\n return grid.execute_until_all_flash()\n\n\ndef execute():\n raw_data = read_data('input.txt')\n print('Part 1 answer:', do_part1(raw_data))\n print('Part 2 answer:', do_part2(raw_data))\n\n\nif __name__ == '__main__':\n execute()\n","repo_name":"trolen/advent-of-code","sub_path":"2021/day11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13701730328","text":"#encoding-utf8\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('foto.jpg',1)\nimgGray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n\n# IMAGEM NEGATIVA\n\nimg_not = cv2.bitwise_not(imgGray)\nres1 = np.hstack((imgGray,img_not)) #stacking images side-by-side\n\ncv2.imwrite('resultados/negativa.jpg',img_not)\n\n\n\n# NORMALIZAÇÃO DA IMAGEM\n\nequ = cv2.equalizeHist(imgGray)\nres2 = np.hstack((imgGray,equ)) #stacking images side-by-side\ncv2.imwrite('resultados/normalizacao.jpg',res2)\n\ncv2.imshow(\"Negativa\",res1)\ncv2.imshow(\"Normalização\",res2)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"jessicahelem/Tecnicas_de_realce_opecv_python","sub_path":"tecnicas-de-realce/tecnicas_de_realce_aula_7.py","file_name":"tecnicas_de_realce_aula_7.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21474385716","text":"\"\"\"\r\n1) Write a normal function that accepts another function as an\r\n argument. Output that other function in your \"normal\" function.\r\n\r\n2) Call your \"normal\" function by passing a lambda function – which\r\n performs any operation of your choice – as an argument.\r\n\r\n3) Tweak your normal function by allowing an infinite amount of\r\n arguments on which your lambda function will be executed. \r\n\r\n4) Format the output of your \"normal\" function such that numbers look\r\n nice and are centered in a 20 character column.\r\n\"\"\"\r\n\r\n\r\ndef normal_fn(other_fn, *args):\r\n print(other_fn)\r\n return [other_fn(arg) for arg in args]\r\n\r\n\r\nhalfed_numbers = normal_fn(lambda arg: arg / 2, 2.5, 4.9, 10, 12.3, 22.7)\r\nfor num in halfed_numbers:\r\n print(f'\"{num:^20.1f}\"')\r\n","repo_name":"TranXuanHoang/Python","sub_path":"00-basic/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"20465853773","text":"import pandas as pd\nimport math\nimport datetime\nimport csv\nimport sqlite3\nimport time\n\n\nclass logLoader:\n def __init__(self, dxd, isco, vindum, vindumNMR, EOR):\n self.dxd = dxd\n self.isco = isco\n self.vindum = vindum\n self.vindumNMR = vindumNMR\n self.EOR = EOR\n\n def test(self):\n return(\"hello world\")\n\n def dxdLoader(self):\n df_dxd = pd.read_csv(self.dxd, skiprows=7)\n df_dxd.drop(df_dxd.columns[[3, 4, 6, 7, 9, 10, 12, 13, 15, 16,\n 18, 19, 21, 22, 24, 25, 27, 28, 30, 31]], axis=1, inplace=True)\n \n df_dxd.columns = ['Date', 'Time', 'Ext3Up', 'Ext3Down', 'Ext4Up',\n 'Ext4Down', 'SS1Up', 'SS1Down', 'SS2Up', 'SS2Down', 'DeadulusDown', 'DeadulusUp']\n df_dxd['DateTime'] = pd.to_datetime(df_dxd['Date'] + \" \" + df_dxd['Time'])\n df_dxd['DateTime'] = df_dxd['DateTime'].dt.round('30s') \n df_dxd = df_dxd.dropna()\n df_dxd = df_dxd.sort_values(by='DateTime')\n df_dxd.drop(df_dxd.columns[[0,1]], axis=1, inplace=True)\n conn = sqlite3.connect('logs.db')\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS DXD (Ext3Up, Ext3Down, Ext4Up, Ext4Down, SS1Up, SS1Down, SS2Up, SS2Down, DeadulusDown, DeadulusUp, DateTime)')\n df_temp = pd.read_sql_query(\"SELECT * from dxd\", conn)\n df_dxd = pd.concat([df_dxd, df_temp])\n df_dxd['DateTime'] = pd.to_datetime(df_dxd['DateTime'] ,errors='coerce')\n df_dxd = df_dxd.sort_values(by='DateTime')\n df_dxd = df_dxd.drop_duplicates(subset='DateTime', keep=\"first\")\n df_dxd.to_sql('DXD', conn, if_exists='replace', index = False)\n\n def iscoLoader(self):\n df_isco = pd.read_csv(self.isco)\n df_isco.columns = df_isco.columns.str.replace('/', '')\n df_isco['DateTime'] = df_isco['DateTime'].str.replace('=', '')\n df_isco['DateTime'] = df_isco['DateTime'].str.replace('\"', '')\n df_isco[pd.to_numeric(df_isco['DateTime'], errors='coerce').notnull()]\n isco_list = [\"Pressure AB\", \"Flow Rate AB\", \"DateTime\"]\n df_isco = df_isco[isco_list]\n df_isco['DateTime'] = pd.to_datetime(df_isco['DateTime'], errors='coerce') \n df_isco.columns = ['ISCOPres', 'ISCORate', 'DateTime']\n df_isco['DateTime'] = pd.to_datetime(df_isco['DateTime'])\n df_isco['DateTime'] = df_isco['DateTime'].dt.round('30s') \n conn = sqlite3.connect('logs.db')\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS ISCO (ISCOPres INTEGER, ISCORate INTEGER, DateTime TIMESTAMP)')\n df_temp = pd.read_sql_query(\"SELECT * from isco\", conn)\n df_isco = pd.concat([df_isco, df_temp])\n df_isco['DateTime'] = pd.to_datetime(df_isco['DateTime'] ,errors='coerce')\n df_isco = df_isco.sort_values(by='DateTime')\n df_isco = df_isco.drop_duplicates(subset='DateTime', keep=\"first\")\n df_isco.to_sql('ISCO', conn, if_exists='replace', index = False)\n\n def vindumLoader(self):\n df_vin = pd.read_csv(self.vindum, index_col=False)\n mainlist = ['Date', 'Time', 'P1 Press', 'P1 Rate', 'P2 Press',\n 'P2 Rate', 'P3 Press', 'P3 Rate', 'P4 Press', 'P4 Rate']\n df_vin = df_vin[[c for c in df_vin.columns if c in mainlist]]\n df_vin = df_vin[:-1]\n df_vin['DateTime'] = pd.to_datetime(df_vin['Date'] + \" \" + df_vin['Time'])\n df_vin['DateTime'] = pd.to_datetime(df_vin['DateTime'])\n df_vin['DateTime'] = df_vin['DateTime'].dt.round('30s')\n df_vin = df_vin.drop(['Date', 'Time'], axis=1)\n df_vin.columns = ['P1Pres', 'P1Rate', 'P2Pres','P2Rate', 'P3Pres', 'P3Rate', 'P4Pres', 'P4Rate', 'DateTime']\n df_vin = df_vin.dropna()\n df_vin = df_vin.sort_values(by='DateTime')\n conn = sqlite3.connect('logs.db')\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS VIN (P1Pres INTEGER, P1Rate INTEGER, P2Pres INTEGER, P2Rate INTEGER, P3Pres INTEGER, P3Rate INTEGER, P4Pres INTEGER, P4Rate INTEGER, DateTime TIMESTAMP)')\n df_temp = pd.read_sql_query(\"SELECT * from vin\", conn)\n df_vin = pd.concat([df_vin, df_temp])\n df_vin['DateTime'] = pd.to_datetime(df_vin['DateTime'] ,errors='coerce')\n df_vin = df_vin.sort_values(by='DateTime')\n df_vin = df_vin.drop_duplicates(subset='DateTime', keep=\"first\")\n df_vin.to_sql('VIN', conn, if_exists='replace', index = False)\n\n def vindumnmrLoader(self):\n df_vinnmr = pd.read_csv(self.vindumNMR)\n df_vinnmr = df_vinnmr.loc[:,['Date', 'Time', 'P1 Press', 'P1 Rate']]\n df_vinnmr.columns = ['Date', 'Time', 'P1NMRPres', 'P1NMRRate']\n df_vinnmr['DateTime'] = pd.to_datetime(df_vinnmr['Date'] + \" \" + df_vinnmr['Time'])\n df_vinnmr['DateTime'] = pd.to_datetime(df_vinnmr['DateTime'])\n df_vinnmr['DateTime'] = df_vinnmr['DateTime'].dt.round('30s') \n df_vinnmr = df_vinnmr.drop(['Date', 'Time'], axis=1) \n df_vinnmr = df_vinnmr.dropna()\n df_vinnmr = df_vinnmr.sort_values(by='DateTime')\n conn = sqlite3.connect('logs.db')\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS VINNMR (P1NMRPres INTEGER, P1NMRRate INTEGER, DateTime TIMESTAMP)')\n df_temp = pd.read_sql_query(\"SELECT * from VINNMR\", conn)\n df_vinnmr = pd.concat([df_vinnmr, df_temp])\n df_vinnmr['DateTime'] = pd.to_datetime(df_vinnmr['DateTime'] ,errors='coerce')\n df_vinnmr = df_vinnmr.sort_values(by='DateTime')\n df_vinnmr = df_vinnmr.drop_duplicates(subset='DateTime', keep=\"first\")\n df_vinnmr.to_sql('VINNMR', conn, if_exists='replace', index = False)\n\n def EORLoader(self):\n df_eor = pd.read_csv(self.EOR, skiprows=46,encoding='latin1', header = None)\n df_eor = df_eor.drop(df_eor.columns[[1, 3, 4, 5, 6, 7, 8, 9, 12, 13 ,16 ,17, 19, 21, 22, 23, 24,\n 25, 26, 27 ,28, 29, 30, 34, 36, 37, 38, 39, 40, 41, 42, \n 43, 44, 45]], axis=1)\n df_eor.columns = ['DateTime', 'EORPConf', 'P1_injV', 'P1_injQ', 'P2_injV', 'P2_ingQ', 'EORUP',\n 'EORDOWN', 'EORVol', 'EORRate', 'EORDP', 'EORHES']\n df_eor = df_eor.set_index('DateTime')\n df_eor.index = pd.to_datetime(df_eor.index)\n df_eor = df_eor.resample('30s').pad()\n df_eor = df_eor.reset_index()\n df_eor = df_eor.dropna()\n df_eor = df_eor.sort_values(by='DateTime')\n conn = sqlite3.connect('logs.db')\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS EOR (DateTime TIMESTAMP, EORPConf INTEGER, P1_injV INTEGER, P1_injQ INTEGER, P2_injV INTEGER, P2_ingQ INTEGER, EORUP INTEGER, EORDOWN INTEGER, EORVol INTEGER, EORRate INTEGER, EORDP INTEGER, EORHES INTEGER)')\n df_temp = pd.read_sql_query(\"SELECT * from EOR\", conn)\n df_eor = pd.concat([df_eor, df_temp])\n df_eor['DateTime'] = pd.to_datetime(df_eor['DateTime'] ,errors='coerce')\n df_eor = df_eor.sort_values(by='DateTime')\n df_eor = df_eor.drop_duplicates(subset='DateTime', keep=\"first\")\n df_eor.to_sql('EOR', conn, if_exists='replace', index = False)\n\n def combined(self):\n #merge logs\n conn = sqlite3.connect('logs.db')\n df_dxd_temp = pd.read_sql_query(\"SELECT * from dxd\", conn)\n df_dxd_temp['DateTime'] = pd.to_datetime(df_dxd_temp['DateTime'] ,errors='coerce')\n df_vin_temp = pd.read_sql_query(\"SELECT * from vin\", conn)\n df_vin_temp['DateTime'] = pd.to_datetime(df_vin_temp['DateTime'] ,errors='coerce')\n df_vinnmr_temp = pd.read_sql_query(\"SELECT * from vinnmr\", conn)\n df_vinnmr_temp['DateTime'] = pd.to_datetime(df_vinnmr_temp['DateTime'] ,errors='coerce')\n df_isco_temp = pd.read_sql_query(\"SELECT * from isco\", conn)\n df_isco_temp['DateTime'] = pd.to_datetime(df_isco_temp['DateTime'] ,errors='coerce')\n df_eor_temp = pd.read_sql_query(\"SELECT * from EOR\", conn)\n df_eor_temp['DateTime'] = pd.to_datetime(df_eor_temp['DateTime'] ,errors='coerce')\n df_com = pd.merge_asof(df_dxd_temp, df_vin_temp, on='DateTime')\n df_com = pd.merge_asof(df_com, df_vinnmr_temp, on='DateTime')\n df_com = pd.merge_asof(df_com, df_isco_temp, on='DateTime')\n df_com.to_sql('COMBINED', conn, if_exists='replace', index = False)\n df_com_eor = pd.merge_asof(df_eor_temp, df_isco_temp, on='DateTime')\n df_com_eor.to_sql('COMBINED_EOR', conn, if_exists='replace', index = False)\n\ndxd = r\"M:\\DXD Log Files\\DXD_Log_4_14_119pm.csv\"\nisco = r\"M:\\DXD Log Files\\ISCO_Log_4_24_835am.csv\"\nvindum = r\"M:\\DXD Log Files\\VindumPumpLog (Pump1-4) 4-14 115pm.csv\"\nvindumNMR = r\"M:\\VindumPumpLog_NMR Lperm.csv\"\nsamplesheet = r\"M:\\Team Chaos Liquid Perm Initialization v2_1.xlsx\"\neor = r\"M:\\live oil 4_28_2020 sep gas\"\nx = logLoader(dxd, isco, vindum, vindumNMR, eor)\nstart = time.time()\nx.dxdLoader()\nprint('dxd complete')\nx.iscoLoader()\nprint('isco complete')\nx.vindumLoader()\nprint('vindum complete')\nx.vindumnmrLoader()\nprint('vindumnmr complete')\nx.EORLoader()\nprint('eor complete')\n\nx.combined()\nprint('combine complete')\nend = time.time()\nprint(end - start)\n\n# schedule.every(5).minutes.do(x.dxdLoader())\n# schedule.every(5).minutes.do(x.vindumLoader())\n# schedule.every(5).minutes.do(x.iscoLoader())\n# schedule.every(5).minutes.do(x.vindumnmrLoader())\n# schedule.every(5).minutes.do(x.EORLoader())\n# schedule.every(5).minutes.do(x.combined())\n# while 1:\n# schedule.run_pending()\n# time.sleep(1)","repo_name":"brianychin/lpermplotter","sub_path":"testdb.py","file_name":"testdb.py","file_ext":"py","file_size_in_byte":9569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11293709669","text":"\n# A Python 3 utility to extract the commands indicated by the first section\n# of a test script (input to test-qirkat.py).\nimport sys, re\n\ndef match(patn, text):\n global Match\n Match = re.match(patn, text)\n return Match\n\ndef group(k):\n return Match.group(k)\n\nif len(sys.argv) <= 1:\n inp = sys.stdin\nelse:\n inp = open(sys.argv[1])\n\nwhile True:\n if not match(r'#|\\s*$', inp.readline()):\n break\n\nfor line in inp:\n if match(r'-{10}', line):\n break\n if not match(r'#|\\s*@', line):\n print(line, end='')\n","repo_name":"gabywang/cs61b","sub_path":"proj2/testing/script-to-input.py","file_name":"script-to-input.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"10250035392","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 2 17:38:06 2021\n\n@author: Fazuximy\n\"\"\"\n\nimport pandas as pd\n\n# --- Day 2: Dive! ---\n# Now, you need to figure out how to pilot this thing.\n\n# It seems like the submarine can take a series of commands like forward 1, down 2, or up 3:\n\n# forward X increases the horizontal position by X units.\n# down X increases the depth by X units.\n# up X decreases the depth by X units.\n# Note that since you're on a submarine, down and up affect your depth, and so they have the opposite result of what you might expect.\n\n# The submarine seems to already have a planned course (your puzzle input). You should probably figure out where it's going. For example:\n\n# forward 5\n# down 5\n# forward 8\n# up 3\n# down 8\n# forward 2\n\n# Your horizontal position and depth both start at 0. The steps above would then modify them as follows:\n\n# forward 5 adds 5 to your horizontal position, a total of 5.\n# down 5 adds 5 to your depth, resulting in a value of 5.\n# forward 8 adds 8 to your horizontal position, a total of 13.\n# up 3 decreases your depth by 3, resulting in a value of 2.\n# down 8 adds 8 to your depth, resulting in a value of 10.\n# forward 2 adds 2 to your horizontal position, a total of 15.\n# After following these instructions, you would have a horizontal position of 15 and a depth of 10. (Multiplying these together produces 150.)\n\n# Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?\n\npath = r\"Z:\\python_stuff\\advent_of_code_2021\\2\\\\\"\nfile = \"input.XSCORE.txt\"\n\n# Importing the text file\ncommand_file = open(path+file, \"r\")\ncommands = command_file.read()\n# Splitting the text file into rows\ncommand_list = commands.split(\"\\n\")\ncommand_file.close()\n\n# Splitting the data file into columns\ncommand_list = [i.split(\" \") for i in command_list]\n# Creating a dataframe for the commands\ncommand_df = pd.DataFrame(command_list[:-1])\ncommand_df.columns = [\"command\",\"number_of_units\"]\n# Convering string number to integers\ncommand_df[\"number_of_units\"] = [int(i) for i in command_df[\"number_of_units\"]]\n\n# Creating a for loop to perform commands\ndepth = 0\nhorizontal_position = 0\nfor i in range(0,len(command_df[\"command\"])):\n if command_df.loc[i,\"command\"] == \"forward\":\n horizontal_position = horizontal_position + command_df.loc[i,\"number_of_units\"]\n elif command_df.loc[i,\"command\"] == \"down\":\n depth = depth + command_df.loc[i,\"number_of_units\"]\n elif command_df.loc[i,\"command\"] == \"up\":\n depth = depth - command_df.loc[i,\"number_of_units\"]\n \nprint(\"The Answer to Day 2 Part One: \\nThe product of the final horizontal position and the final depth is: {}\".format((horizontal_position * depth)))\n\n\n# --- Part Two ---\n# Based on your calculations, the planned course doesn't seem to make any sense. You find the submarine manual and discover that the process is actually slightly more complicated.\n\n# In addition to horizontal position and depth, you'll also need to track a third value, aim, which also starts at 0. The commands also mean something entirely different than you first thought:\n\n# down X increases your aim by X units.\n# up X decreases your aim by X units.\n# forward X does two things:\n# It increases your horizontal position by X units.\n# It increases your depth by your aim multiplied by X.\n# Again note that since you're on a submarine, down and up do the opposite of what you might expect: \"down\" means aiming in the positive direction.\n\n# Now, the above example does something different:\n\n# forward 5 adds 5 to your horizontal position, a total of 5. Because your aim is 0, your depth does not change.\n# down 5 adds 5 to your aim, resulting in a value of 5.\n# forward 8 adds 8 to your horizontal position, a total of 13. Because your aim is 5, your depth increases by 8*5=40.\n# up 3 decreases your aim by 3, resulting in a value of 2.\n# down 8 adds 8 to your aim, resulting in a value of 10.\n# forward 2 adds 2 to your horizontal position, a total of 15. Because your aim is 10, your depth increases by 2*10=20 to a total of 60.\n# After following these new instructions, you would have a horizontal position of 15 and a depth of 60. (Multiplying these produces 900.)\n\n# Using this new interpretation of the commands, calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?\n\n# Creating a for loop for performing the new interpretation of the commands\nnew_depth = 0\nnew_horizontal_position = 0\naim = 0\nfor i in range(0,len(command_df[\"command\"])):\n if command_df.loc[i,\"command\"] == \"forward\":\n new_horizontal_position = new_horizontal_position + command_df.loc[i,\"number_of_units\"]\n new_depth = new_depth + aim * command_df.loc[i,\"number_of_units\"]\n elif command_df.loc[i,\"command\"] == \"down\":\n aim = aim + command_df.loc[i,\"number_of_units\"]\n elif command_df.loc[i,\"command\"] == \"up\":\n aim = aim - command_df.loc[i,\"number_of_units\"]\n \nprint(\"The Answer to Day 2 Part Two: \\nThe product of the planned course final horizontal position and the planned course final depth is: {}\".format((new_horizontal_position * new_depth)))\n\n\n\n\n","repo_name":"fazuximy/advent_of_code_2021","sub_path":"days/2/day_2.py","file_name":"day_2.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"17943728608","text":"def symet (li,m):\n for i in range(m):\n for j in range(m):\n if (i!=j and li[i][j]!=li[j][i]):\n return 0\n return 1\nn=int(input(\"vueillez donner la taille\"))\nliste=[ [int(input(\"donner une valeur\"))for j in range(n)] for i in range(n)]\nR=symet(liste,n)\nif R==1:\n print(\"matrice symetrique\")\nelse:\n print(\"matrice n'est pas symetrique\")","repo_name":"anassadouq/Python-Exercices","sub_path":"fct symétrique listes.py","file_name":"fct symétrique listes.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3114280467","text":"from filter_plc import filter_plc\nfrom disinfection_plc import DisinfectionPLC\nfrom parameter import Parameter\nfrom gadget import Gadget\nfrom threading import Thread\n\nfrom FloatModbusServer import FloatModbusServer\nfrom time import sleep\n\n\ndef get_basic_plc_parameters():\n water_level = Parameter(60.00, 50.00, 90.00, 0, 105.0, 5)\n temperature = Parameter(18.0, 12.0, 26.0, 8.0, 3.0, 10)\n\n return water_level, temperature\n\nwater_level, temperature = get_basic_plc_parameters()\n\nturbidity = Parameter(0.6, 0.3, 1.25, 0.0, 200.0, 1)\ndissolved_solids = Parameter(150, 0, 550.0, 0, 10000, 30)\ngraverl_filter = Gadget(70, 2000)\nsand_filter = Gadget(40, 1000)\n\nf_plc = filter_plc(water_level, temperature,\n max_water_level_last_tank=150,\n water_level_last_tank=70,\n turbidity=turbidity,\n dissolved_solids=dissolved_solids,\n gravel_filter=graverl_filter,\n sand_filter=sand_filter)\n\n\ndef filter_plc_switch():\n filter_running = server.data_bank.get_holding_registers(12)[0]\n gravel_running = server.data_bank.get_holding_registers(13)[0]\n sand_running = server.data_bank.get_holding_registers(14)[0]\n flow_in_valve = server.data_bank.get_holding_registers(15)[0]\n flow_out_valve = server.data_bank.get_holding_registers(16)[0]\n\n while True:\n current_filter_running = server.data_bank.get_holding_registers(12)[0]\n current_gravel_running = server.data_bank.get_holding_registers(13)[0]\n current_sand_running = server.data_bank.get_holding_registers(14)[0]\n current_flow_in_valve = server.data_bank.get_holding_registers(15)[0]\n current_flow_out_valve = server.data_bank.get_holding_registers(16)[0]\n\n if current_flow_in_valve != flow_in_valve:\n flow_in_valve = current_flow_in_valve\n f_plc.switch_intake_valve()\n\n if current_flow_out_valve != flow_out_valve:\n flow_out_valve = current_flow_out_valve\n f_plc.switch_outlet_valve()\n\n if current_filter_running != filter_running:\n filter_running = current_filter_running\n f_plc.switch_on_off()\n\n if current_gravel_running != gravel_running:\n gravel_running = current_gravel_running\n f_plc.switch_gravel_filter(gravel_running)\n\n if current_sand_running != sand_running:\n sand_running = current_sand_running\n f_plc.switch_sand_filter(sand_running)\n\n\nif __name__ == '__main__':\n server = FloatModbusServer(\"127.0.0.1\", 12345, no_block=True)\n server.start()\n # filter running\n server.data_bank.set_holding_registers(12, [1])\n # gravel running\n server.data_bank.set_holding_registers(13, [1])\n # sand running\n server.data_bank.set_holding_registers(14, [1])\n # take in valve running\n server.data_bank.set_holding_registers(15, [1])\n # take out valve running\n server.data_bank.set_holding_registers(16, [1])\n\n server.write_float(99, 70)\n\n switch_thread = Thread(target=filter_plc_switch, daemon=True)\n switch_thread.start()\n\n\n while True:\n f_plc.dynamic_change()\n value_list = [f_plc.get_water_level(),\n f_plc.get_temperature(),\n f_plc.get_turbidity(),\n f_plc.get_dissolved_solids(),\n f_plc.get_gravel_filter_efficiency(),\n f_plc.get_sand_filter_efficiency(),\n ]\n server.write_floats(0, value_list)\n\n value_list.append(f_plc.is_running)\n value_list.append(f_plc.get_gravel_filter_mode())\n value_list.append(f_plc.get_sand_filter_mode())\n value_list.append(f_plc.intake)\n value_list.append(f_plc.outlet)\n\n water_level_last_tank = server.read_float(99)\n f_plc.set_water_level_last_tank(water_level_last_tank)\n\n print(value_list)\n sleep(1)\n","repo_name":"MasonSyj/SummerProject","sub_path":"code/core/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42084740774","text":"import tensorflow as tf\nimport numpy as np\nfrom scipy import misc\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\n\ndef get_image_size(config):\n im_size = misc.imread(\n glob(config.train_directory + '*' + config.im_ext)[0]).shape\n if len(im_size) == 2:\n im_size = np.hstack((im_size, 3))\n return im_size\n\n\ndef repeat_elements(x, rep, axis):\n '''Repeats the elements of a tensor along an axis, like np.repeat\n If x has shape (s1, s2, s3) and axis=1, the output\n will have shape (s1, s2 * rep, s3)\n This function is taken from keras backend\n '''\n x_shape = x.get_shape().as_list()\n splits = tf.split(axis, x_shape[axis], x)\n x_rep = [s for s in splits for i in range(rep)]\n return tf.concat(axis, x_rep)\n\n\ndef repeat_reshape_2d(\n image, im_size, num_channels, tf_dtype=tf.float32,\n img_mean_value=None):\n res_image = tf.reshape(image, np.asarray(im_size)[:num_channels])\n image = tf.cast(repeat_elements(tf.expand_dims(\n res_image, 2), 3, axis=2), tf_dtype)\n if img_mean_value is not None:\n image -= img_mean_value\n return image\n\n\ndef clip_to_value(data, low, high, val, tf_dtype=tf.float32):\n hmask = tf.cast(tf.greater(data, high), tf_dtype)\n lmask = tf.cast(tf.less(data, low), tf_dtype)\n bmask = tf.cast(tf.equal(hmask + lmask, False), tf_dtype)\n return data * bmask\n\n\ndef read_and_decode_single_example(\n filename, im_size, model_input_shape, train,\n img_mean_value=None, feat_mean_value=None, num_channels=2, img_mean_file=None):\n \"\"\"first construct a queue containing a list of filenames.\n this lets a user split up there dataset in multiple files to keep\n size down\"\"\"\n filename_queue = tf.train.string_input_producer([filename],\n num_epochs=None)\n # Unlike the TFRecordWriter, the TFRecordReader is symbolic\n reader = tf.TFRecordReader()\n # One can read a single serialized example from a filename\n # serialized_example is a Tensor of type string.\n _, serialized_example = reader.read(filename_queue)\n # The serialized example is converted back to actual values.\n # One needs to describe the format of the objects to be returned\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.string),\n 'image': tf.FixedLenFeature([], tf.string)\n }\n )\n\n\n label = tf.decode_raw(features['label'], tf.uint8)\n image = tf.decode_raw(features['image'], tf.uint8)\n\n raw_im_shape = [480, 640]\n\n label = tf.reshape(label, np.asarray([112, 112]))\n image = tf.reshape(image, np.asarray([224, 224, 3]))\n\n return label, tf.cast(image, tf.uint8)\n\n # Process features specially\n\n # To support augmentations we have to convert data to 3D\n if num_channels == 2:\n image = repeat_reshape_2d(\n image, im_size, num_channels, img_mean_value=img_mean_value)\n else:\n # Need to reconstruct channels first then transpose channels\n # import pdb; pdb.set_trace()\n res_image = tf.reshape(image, np.asarray(im_size)[[2, 0, 1]])\n if img_mean_value is not None:\n res_image -= img_mean_value\n image = tf.transpose(res_image, [2, 1, 0])\n\n # Insert augmentation and preprocessing here\n\n # And finally handle the labels\n label = tf.reshape(label, np.asarray(im_size[0:2]))\n\n # Set means\n if img_mean_value is None:\n img_mean_value = 0\n elif img_mean_value == 'within':\n img_mean_value = tf.reduce_mean(image)\n return label, image\n\n\ndef read_and_decode(\n filename_queue, im_size, model_input_shape,\n train, img_mean_value=[None],\n num_channels=3, img_mean_file=None):\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.string),\n 'image': tf.FixedLenFeature([], tf.string),\n }\n )\n\n # Convert from a scalar string tensor (whose single string has\n # import pdb; pdb.set_trace()\n label = tf.decode_raw(features['label'], tf.uint8)\n label_float = tf.decode_raw(features['label'], tf.float64)\n image = tf.decode_raw(features['image'], tf.int8)\n label = tf.reshape(label, np.asarray([112, 112]))\n image = tf.reshape(image, np.asarray([224, 224, 3]))\n\n orig_image = image\n\n minl = tf.reduce_min(label)\n label = tf.cast(label, tf.float32) - tf.cast(minl, tf.float32)\n maxl = tf.reduce_max(label)\n label = tf.div(tf.cast(label, tf.float32),tf.cast(maxl, tf.float32))\n\n # if img_mean_value != None:\n # # import pdb; pdb.set_trace()\n # mean = np.load(img_mean_value)['im_list'].astype(int)\n\n # image = tf.cast(image, tf.int64) - tf.convert_to_tensor(mean)\n\n # if train is not None:\n \n # image = augment_data(image, model_input_shape, im_size, ['up_down'])\n # label = tf.expand_dims(label, dim=2)\n # label = augment_data(label, [112, 112], im_size, ['up_down'])\n # label = tf.squeeze(label)\n\n return label, tf.cast(image, tf.uint8)\n\n\ndef augment_data(image, model_input_shape, im_size, train):\n if train is not None:\n if 'left_right' in train:\n image = tf.image.flip_left_right(image)\n if 'up_down' in train:\n image = tf.image.flip_up_down(image)\n if 'random_contrast' in train:\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n if 'random_brightness' in train:\n image = tf.image.random_brightness(image, max_delta=32./255.)\n if 'rotate' in train:\n image = tf.image.rot90(image, k=np.random.randint(4))\n if 'random_crop' in train:\n image = tf.random_crop(\n image,\n [model_input_shape[0], model_input_shape[1], im_size[2]])\n else:\n image = tf.image.resize_image_with_crop_or_pad(\n image, model_input_shape[0], model_input_shape[1])\n else:\n image = tf.image.resize_image_with_crop_or_pad(\n image, model_input_shape[0], model_input_shape[1])\n return image\n\n\ndef inputs(\n tfrecord_file, batch_size, im_size, model_input_shape,\n train=None, num_epochs=None, use_features=False,\n img_mean_value=None):\n\n with tf.name_scope('input'):\n filename_queue = tf.train.string_input_producer(\n [tfrecord_file], num_epochs=num_epochs)\n\n # Even when reading in multiple threads, share the filename\n # queue.\n label, image = read_and_decode(\n filename_queue=filename_queue,\n im_size=im_size,\n model_input_shape=model_input_shape,\n train=None,\n img_mean_value=img_mean_value)\n\n # Shuffle the examples and collect them into batch_size batches.\n # (Internally uses a RandomShuffleQueue.)\n # We run this in two threads to avoid being a bottleneck.\n if use_features:\n input_data = feat\n else:\n input_data = image\n\n data, labels = tf.train.shuffle_batch(\n [input_data, label], batch_size=batch_size, num_threads=2,\n capacity=1000 + 3 * batch_size, min_after_dequeue=1000)\n # allow_smaller_final_batch=True)\n\n # Finally, have to reshape label -> 1d matrix\n label_size = [112, 112]\n labels = tf.reshape(labels, [batch_size, np.prod(np.asarray(label_size))])\n return data, labels\n","repo_name":"andrewcharlesjones/deepgaze","sub_path":"ops/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"34070305333","text":"import numpy as np\nimport itertools\n\n# Manually insert the coordinates of the shapes\ndef get_coordinates_of_shapes():\n\tshape_coordinates = {}\n\tshape_coordinates['s1'] = [[0,0,2], [0,12,10]]\n\tshape_coordinates['s2'] = [[0,2,4], [0,10,8]]\n\tshape_coordinates['s3'] = [[0,2,3], [0,4,0]]\n\tshape_coordinates['s4'] = [[2,3,3], [4,6,0]]\n\tshape_coordinates['s5'] = [[0,4,6], [12,8,12]]\n\tshape_coordinates['s6'] = [[3,3,4,6,6], [0,6,8,6,0]]\n\tshape_coordinates['s7'] = [[4,6,6], [8,12,6]]\n\tshape_coordinates['s8'] = [[6,8,12], [0,4,0]]\n\tshape_coordinates['s9'] = [[6,6,9,8], [6,12,6,4]]\n\tshape_coordinates['s10'] = [[8,9,12], [4,6,0]]\n\tshape_coordinates['s11'] = [[6,6,8], [0,6,4]]\n\tshape_coordinates['s12'] = [[6,12,12,9], [12,12,8,6]]\n\tshape_coordinates['s13'] = [[9,12,12], [6,8,6]]\n\tshape_coordinates['s14'] = [[9,12,12], [6,6,0]]\n\n\treturn shape_coordinates\n\n\n# Simple implementation of the shoelace formula to calculate area based on x-y coordinates\ndef calculate_area_of_shape(x, y):\n\treturn 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n\n# Calculate areas of all shapes\ndef calculate_area_of_shapes():\n\tshape_coordinates = get_coordinates_of_shapes()\n\tshape_areas = []\n\tfor key, coords in shape_coordinates.items():\n\t\tshape_areas.append(calculate_area_of_shape(coords[0], coords[1]))\n\n\treturn shape_areas\n\n\ndef partition_by_sum(input_list, target):\n\toutput_list = []\n\n\t# Get all possible combinations of numbers that sum to the target\n\tsum_sets = [list(seq) for i in range(len(input_list), 0, -1) for seq in itertools.combinations(input_list, i) if sum(seq) == target]\n\n\t# Sort every set of numbers\n\tfor i in range(len(sum_sets)):\n\t\tsum_sets[i].sort()\n\n\t# Create a new list that has no duplicates; will account for permutations\n\t# at the end\n\tsum_sets_unique = []\n\tfor x in sum_sets:\n\t\tif x not in sum_sets_unique:\n\t\t\tsum_sets_unique.append(x)\n\n\t# Find all of the pairs that combine to make the original list\n\tfor i, set1 in enumerate(sum_sets_unique):\n\t\tfor j, set2 in enumerate(sum_sets_unique):\n\t\t\tcomb = set1 + set2\n\t\t\tcomb.sort()\n\t\t\tif set1 != set2 and comb == input_list:# and [set1, set2] not in output_list and [set2, set1] not in output_list:\n\t\t\t\toutput_list.append([set1, set2])\n\n\treturn output_list\n\n\ndef main():\n\t# Load in the areas of each shape\n\tshape_areas = [int(x) for x in calculate_area_of_shapes()]\n\n\t# Sort shape areas\n\tshape_areas.sort()\n\n\t# Get sum of all areas and the target area for each color\n\tarray = np.asarray(shape_areas, dtype=np.int)\n\ttotal_area = array.sum()\n\tquadrant_area = total_area / 4\n\n\t# Split the shapes as if there are only 2 colors, target area of half the total area\n\t# for each color; does not include permutation of shapes with same area across sets\n\ttwo_color_partition_sets = partition_by_sum(shape_areas, quadrant_area*2)\n\n\t# For each pair of sets that results from the first partition, partition each set in\n\t# each pair again to get 4 colors\n\tfour_color_partition_sets = []\n\tfor pair_set in two_color_partition_sets:\n\t\tpartition_set1 = partition_by_sum(pair_set[0], quadrant_area)\n\t\tpartition_set2 = partition_by_sum(pair_set[1], quadrant_area)\n\t\tfour_color_partition_sets.append([partition_set1, partition_set2])\n\t\tprint([partition_set1, partition_set2])\n\n\tprint(len(four_color_partition_sets))\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"adjordan/fivethirtyeight-riddler-solutions","sub_path":"01-26-2018/classic.py","file_name":"classic.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"7231077897","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\nfrom pymysql import Error as DBError\n\nfrom ChinaQualityNews.settings import DB_SETTINGS\n\n\nclass ChinaqualitynewsPipeline(object):\n def __init__(self):\n self._host = DB_SETTINGS.get('host')\n self._port = DB_SETTINGS.get('port')\n self._user = DB_SETTINGS.get('user')\n self._password = DB_SETTINGS.get('password')\n self._db_name = DB_SETTINGS.get('db')\n self.db = pymysql.connect(host=self._host,\n user=self._user,\n password=self._password,\n port=self._port,\n db=self._db_name)\n self.db.set_charset('utf8')\n\n def process_item(self, item, spider):\n self.add_content(item)\n return item\n\n def add_content(self, item):\n title = item.get('title', '')\n publish_time = item.get('publish_time', '')\n info_sources = item.get('info_sources', '')\n company_name = item.get('company_name', '')\n company_address = item.get('company_address', '')\n sampled_unit = item.get('sampled_unit', '')\n sampled_address = item.get('sampled_address', '')\n sample_name = item.get('sample_name', '')\n specification_type = item.get('specification_type', '')\n trademark = item.get('trademark', '')\n production_date = item.get('production_date', '')\n factory_num = item.get('factory_num', '')\n approval_num = item.get('approval_num', '')\n unqualified = item.get('unqualified', '')\n test_value = item.get('test_value', '')\n standard = item.get('standard', '')\n sampling_unit = item.get('sampling_unit', '')\n inspection_institution = item.get('inspection_institution', '')\n source_url = item.get('source_url', '')\n\n sql = \"INSERT INTO unqualified_sampling (\" \\\n \"title, publish_time, info_sources, company_name, company_address, \" \\\n \"sampled_unit, sampled_address, sample_name, specification_type, trademark, \" \\\n \"production_date, factory_num, approval_num, unqualified, test_value, \" \\\n \"standard, sampling_unit, inspection_institution, \" \\\n \"source_url) values ('{title}', '{publish_time}', '{info_sources}', '{company_name}', '{company_address}', \" \\\n \"'{sampled_unit}', '{sampled_address}', '{sample_name}', '{specification_type}', '{trademark}', \" \\\n \"'{production_date}', '{factory_num}', '{approval_num}', '{unqualified}', '{test_value}', \" \\\n \"'{standard}', '{sampling_unit}', '{inspection_institution}', '{source_url}');\".\\\n format(title=title, publish_time=publish_time, info_sources=info_sources, company_name=company_name, company_address=company_address,\n sampled_unit=sampled_unit, sampled_address=sampled_address, sample_name=sample_name, specification_type=specification_type, trademark=trademark,\n production_date=production_date, factory_num=factory_num, approval_num=approval_num,unqualified=unqualified, test_value=test_value,\n standard=standard, sampling_unit=sampling_unit, inspection_institution=inspection_institution, source_url=source_url)\n sql.replace(\"\\\\\", '/')\n print(sql)\n print(\"Do Insert ...\")\n try:\n self.db.ping(reconnect=True)\n with self.db.cursor() as cursor:\n cursor.execute(sql)\n self.db.commit()\n print(\"Insert Success...\")\n except DBError as e:\n # print(e)\n raise e","repo_name":"TengTengCai/ChinaQualityNews","sub_path":"ChinaQualityNews/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18389838113","text":"import collections\nimport json\nimport os\nimport random\nimport time\nimport typing\n\nimport cupy as cp\nimport pandas as pd\n\nimport cudf\n\nimport morpheus\nfrom morpheus.io.deserializers import read_file_to_df\nfrom morpheus.stages.inference import inference_stage\n\n\nclass TestDirectories(object):\n\n def __init__(self, cur_file=__file__) -> None:\n self.tests_dir = os.path.dirname(cur_file)\n self.morpheus_root = os.environ.get('MORPHEUS_ROOT', os.path.dirname(self.tests_dir))\n self.data_dir = morpheus.DATA_DIR\n self.models_dir = os.path.join(self.morpheus_root, 'models')\n self.datasets_dir = os.path.join(self.models_dir, 'datasets')\n self.training_data_dir = os.path.join(self.datasets_dir, 'training-data')\n self.validation_data_dir = os.path.join(self.datasets_dir, 'validation-data')\n self.tests_data_dir = os.path.join(self.tests_dir, 'tests_data')\n self.mock_triton_servers_dir = os.path.join(self.tests_dir, 'mock_triton_server')\n\n\nTEST_DIRS = TestDirectories()\n\n\nclass IW(inference_stage.InferenceWorker):\n \"\"\"\n Concrete impl class of `InferenceWorker` for the purposes of testing\n \"\"\"\n\n def calc_output_dims(self, _):\n # Intentionally calling the abc empty method for coverage\n super().calc_output_dims(_)\n return (1, 2)\n\n\nResults = collections.namedtuple('Results', ['total_rows', 'diff_rows', 'error_pct'])\n\n\ndef calc_error_val(results_file):\n \"\"\"\n Based on the calc_error_val function in val-utils.sh\n \"\"\"\n with open(results_file) as fh:\n results = json.load(fh)\n\n total_rows = results['total_rows']\n diff_rows = results['diff_rows']\n return Results(total_rows=total_rows, diff_rows=diff_rows, error_pct=(diff_rows / total_rows) * 100)\n\n\ndef write_data_to_kafka(bootstrap_servers: str,\n kafka_topic: str,\n data: typing.List[typing.Union[str, dict]],\n client_id: str = 'morpheus_unittest_writer') -> int:\n \"\"\"\n Writes `data` into a given Kafka topic, emitting one message for each line int he file. Returning the number of\n messages written\n \"\"\"\n from kafka import KafkaProducer\n num_records = 0\n producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id)\n for row in data:\n if isinstance(row, dict):\n row = json.dumps(row)\n producer.send(kafka_topic, row.encode('utf-8'))\n num_records += 1\n\n producer.flush()\n\n assert num_records > 0\n time.sleep(1)\n\n return num_records\n\n\ndef write_file_to_kafka(bootstrap_servers: str,\n kafka_topic: str,\n input_file: str,\n client_id: str = 'morpheus_unittest_writer') -> int:\n \"\"\"\n Writes data from `inpute_file` into a given Kafka topic, emitting one message for each line int he file.\n Returning the number of messages written\n \"\"\"\n with open(input_file) as fh:\n data = [line.strip() for line in fh]\n\n return write_data_to_kafka(bootstrap_servers=bootstrap_servers,\n kafka_topic=kafka_topic,\n data=data,\n client_id=client_id)\n\n\ndef compare_class_to_scores(file_name, field_names, class_prefix, score_prefix, threshold):\n df = read_file_to_df(file_name, df_type='pandas')\n for field_name in field_names:\n class_field = f\"{class_prefix}{field_name}\"\n score_field = f\"{score_prefix}{field_name}\"\n above_thresh = df[score_field] > threshold\n\n df[class_field].to_csv(f\"/tmp/class_field_{field_name}.csv\")\n df[score_field].to_csv(f\"/tmp/score_field_vals_{field_name}.csv\")\n above_thresh.to_csv(f\"/tmp/score_field_{field_name}.csv\")\n\n assert all(above_thresh == df[class_field]), f\"Mismatch on {field_name}\"\n\n\ndef extend_df(df, repeat_count) -> pd.DataFrame:\n extended_df = pd.concat([df for _ in range(repeat_count)])\n return extended_df.reset_index(inplace=False, drop=True)\n\n\ndef assert_path_exists(filename: str, retry_count: int = 5, delay_ms: int = 500):\n \"\"\"\n This should be used in place of `assert os.path.exists(filename)` inside of tests. This will automatically retry\n with a delay if the file is not immediately found. This removes the need for adding any `time.sleep()` inside of\n tests\n\n Parameters\n ----------\n filename : str\n The path to assert exists\n retry_count : int, optional\n Number of times to check for the file before failing, by default 5\n delay_ms : int, optional\n Milliseconds between trys, by default 500\n\n Returns\n -------\n Returns none but will throw an assertion error on failure.\n \"\"\"\n\n # Quick exit if the file exists\n if (os.path.exists(filename)):\n return\n\n attempts = 1\n\n # Otherwise, delay and retry\n while (attempts <= retry_count):\n time.sleep(delay_ms / 1000.0)\n\n if (os.path.exists(filename)):\n return\n\n attempts += 1\n\n # Finally, actually assert on the final try\n assert os.path.exists(filename)\n\n\ndef duplicate_df_index(df: pd.DataFrame, replace_ids: typing.Dict[int, int]):\n\n # Return a new dataframe where we replace some index values with others\n return df.rename(index=replace_ids)\n\n\ndef duplicate_df_index_rand(df: pd.DataFrame, count=1):\n\n assert count * 2 <= len(df), \"Count must be less than half the number of rows\"\n\n # Sample 2x the count. One for the old ID and one for the new ID. Dont want duplicates so we use random.sample\n # (otherwise you could get less duplicates than requested if two IDs just swap)\n dup_ids = random.sample(df.index.values.tolist(), 2 * count)\n\n # Create a dictionary of old ID to new ID\n replace_dict = {x: y for x, y in zip(dup_ids[:count], dup_ids[count:])}\n\n # Return a new dataframe where we replace some index values with others\n return duplicate_df_index(df, replace_dict)\n\n\ndef assert_df_equal(df_to_check: typing.Union[pd.DataFrame, cudf.DataFrame], val_to_check: typing.Any):\n\n # Comparisons work better in cudf so convert everything to that\n if (isinstance(df_to_check, cudf.DataFrame) or isinstance(df_to_check, cudf.Series)):\n df_to_check = df_to_check.to_pandas()\n\n if (isinstance(val_to_check, cudf.DataFrame) or isinstance(val_to_check, cudf.Series)):\n val_to_check = val_to_check.to_pandas()\n elif (isinstance(val_to_check, cp.ndarray)):\n val_to_check = val_to_check.get()\n\n bool_df = df_to_check == val_to_check\n\n return bool(bool_df.all(axis=None))\n\n\ndef assert_results(results: dict) -> dict:\n \"\"\"\n Receives the results dict from the `CompareDataframeStage.get_results` method,\n and asserts that all columns and rows match\n \"\"\"\n assert results[\"diff_cols\"] == 0, f\"Expected diff_cols=0 : {results}\"\n assert results[\"diff_rows\"] == 0, f\"Expected diff_rows=0 : {results}\"\n return results\n","repo_name":"kasinadhsarma/Morpheus","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"1389718772","text":"import numpy as np\nimport torch\nfrom einops import repeat, rearrange\nimport open3d as o3d\n\n\n__author__ = '__Girish_Hegde__'\n\n\ndef to_pcd(points, colors=None, normals=None, viz=False, filepath=None, name='Viz'):\n \"\"\" Function to convert points array into o3d.PointCloud\n\n Args:\n points (np.ndarray): [N, 3] - list of xyz of points.\n colors (np.ndarray/List): [N, 3] pcd colors or [r, g, b]. Defaults to None.\n normals (np.ndarray): [N, 3] point normals. Defaults to None.\n viz (bool): show point cloud. Defaults to False.\n filepath (str): save point cloud as. Defaults to None.\n name (str): window name.\n\n Returns:\n (o3d.PointCloud): point cloud\n \"\"\"\n vec3 = o3d.utility.Vector3dVector\n pcd = o3d.geometry.PointCloud(vec3(points))\n if normals is not None: pcd.normals = vec3(normals)\n if colors is not None:\n colors = np.array(colors)\n if len(colors.shape) > 1:\n pcd.colors = vec3(colors)\n else:\n pcd.paint_uniform_color(colors)\n if viz: o3d.visualization.draw_geometries([pcd], name)\n if filepath is not None: o3d.io.write_point_cloud(filepath, pcd)\n return pcd\n\n\ndef to_mesh(points, faces, colors=None, viz=False, filepath=None, name='Viz'):\n \"\"\" Function to convert points array into o3d.geometry.TriangleMesh\n\n Args:\n points (np.ndarray): [N, 3] - list of xyz of points.\n faces (np.ndarray): [M, 3] - list of triangle faces of points.\n colors (np.ndarray/List, optional): [N, 3] pcd colors or [r, g, b]. Defaults to None.\n viz (bool, optional): show point cloud. Defaults to False.\n filepath (str, optional): save point cloud as. Defaults to None.\n name (str): window name.\n\n Returns:\n (o3d.geometry.TriangleMesh): mesh\n \"\"\"\n mesh = o3d.geometry.TriangleMesh()\n vec3 = o3d.utility.Vector3dVector\n mesh.vertices = vec3(points)\n mesh.triangles = o3d.utility.Vector3iVector(faces)\n if colors is not None:\n colors = np.array(colors)\n if len(colors.shape) > 1:\n mesh.vertex_colors = vec3(colors)\n else:\n mesh.paint_uniform_color(colors)\n if viz: o3d.visualization.draw_geometries([mesh], name, mesh_show_back_face=True)\n if filepath is not None: o3d.io.write_triangle_mesh(filepath, mesh)\n return mesh\n\n\ndef to_lines(points, edges, colors=None, viz=False, filepath=None, name='Viz'):\n \"\"\" Function to convert points and edges into o3d.geometry.LineSet\n\n Args:\n points (np.ndarray[float]): [N, 3] - list of xyz of points.\n edges (np.ndarray[int]): [M, 2] - list of edges.\n colors (np.ndarray/List, optional): [N, 3] edge colors or [r, g, b].\n viz (bool, optional): show point cloud. Defaults to False.\n filepath (str, optional): save point cloud as. Defaults to None.\n name (str): window name.\n\n Returns:\n (o3d.geometry.LineSet): lines\n \"\"\"\n lines = o3d.geometry.LineSet()\n vec3 = o3d.utility.Vector3dVector\n lines.points = vec3(points)\n lines.lines = o3d.utility.Vector2iVector(edges)\n if colors is not None:\n colors = np.array(colors)\n if len(colors.shape) > 1:\n lines.colors = vec3(colors)\n else:\n lines.paint_uniform_color(colors)\n if viz: o3d.visualization.draw_geometries([lines], name, mesh_show_back_face=True)\n if filepath is not None: o3d.io.write_line_set(filepath, lines)\n return lines\n\n\ndef create_octant_planes(span=1):\n \"\"\" Function to create open3d octant seperation planes\n \"\"\"\n # Create points representing the octant separation planes\n xy_plane_points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])*span\n xz_plane_points = np.array([[-1, 0, -1], [1, 0, -1], [1, 0, 1], [-1, 0, 1]])*span\n yz_plane_points = np.array([[0, -1, -1], [0, 1, -1], [0, 1, 1], [0, -1, 1]])*span\n\n # Create triangle mesh for the XY plane\n xy_triangles = np.array([[0, 1, 2], [0, 2, 3]])\n xy_plane_mesh = o3d.geometry.TriangleMesh()\n xy_plane_mesh.vertices = o3d.utility.Vector3dVector(xy_plane_points)\n xy_plane_mesh.triangles = o3d.utility.Vector3iVector(xy_triangles)\n xy_plane_mesh.paint_uniform_color([1, 0, 0])\n\n # Create triangle mesh for the XZ plane\n xz_triangles = np.array([[0, 1, 2], [0, 2, 3]])\n xz_plane_mesh = o3d.geometry.TriangleMesh()\n xz_plane_mesh.vertices = o3d.utility.Vector3dVector(xz_plane_points)\n xz_plane_mesh.triangles = o3d.utility.Vector3iVector(xz_triangles)\n xz_plane_mesh.paint_uniform_color([0, 0, 1])\n\n # Create triangle mesh for the YZ plane\n yz_triangles = np.array([[0, 1, 2], [0, 2, 3]])\n yz_plane_mesh = o3d.geometry.TriangleMesh()\n yz_plane_mesh.vertices = o3d.utility.Vector3dVector(yz_plane_points)\n yz_plane_mesh.triangles = o3d.utility.Vector3iVector(yz_triangles)\n yz_plane_mesh.paint_uniform_color([0, 1, 0])\n\n octants = xy_plane_mesh + xz_plane_mesh + yz_plane_mesh\n return octants\n\n\ndef viz_frustum(grid_pts, eyes, face_clr=(1, 1, 0), line_clr=None):\n vectype = torch if isinstance(grid_pts, torch.Tensor) else np\n frustum_pts = vectype.stack((\n eyes[0, 0],\n grid_pts[0, 0], grid_pts[0, -1], \n grid_pts[-1, -1], grid_pts[-1, 0]\n ))\n\n frustum_lines = np.array([[0, 1], [0, 2], [0, 3], [0, 4]])\n frustum_clrs = np.array([[0., 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])\n \n if line_clr is not None:\n frustum_clrs = np.array([line_clr for _ in range(4)])\n \n frustum = to_lines(frustum_pts, frustum_lines, frustum_clrs)\n gridpcd = to_pcd(grid_pts.reshape(-1, 3), face_clr, viz=False)\n\n return frustum, gridpcd\n\n\ndef spherical_viz(c2w, center=None, scene=None):\n i, j, k, eyes = c2w[:, :3, :].permute(2, 0, 1)\n nviews = eyes.shape[0]\n center = center if center is not None else eyes.mean(0)\n\n scale = 0.25*np.linalg.norm(eyes[0] - eyes[1])\n pts = np.vstack([eyes, eyes + scale*i])\n lines = np.hstack([np.arange(nviews)[:, None], (np.arange(nviews) + nviews)[:, None]])\n ilns = to_lines(pts, lines, (1, 0, 0))\n\n pts = np.vstack([eyes, eyes + scale*j])\n jlns = to_lines(pts, lines, (0, 1, 0))\n\n pts = np.vstack([eyes, eyes + scale*k])\n klns = to_lines(pts, lines, (0, 0, 1))\n\n obj = to_pcd(center[None, ...], (0, 0, 0))\n pcd = to_pcd(eyes, (0, 0, 0), )\n\n vizobjs = [obj, pcd, ilns, jlns, klns]\n if scene is not None:\n vizobjs = vizobjs + scene\n o3d.visualization.draw_geometries(vizobjs, mesh_show_back_face=True)\n \n return vizobjs\n","repo_name":"girishdhegde/NN3D","sub_path":"nerf/viz_utils.py","file_name":"viz_utils.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"39246397953","text":"# JST Transcript Reader\n\nimport re\nimport os\nimport pytesseract\nfrom PIL import Image\nimport glob\nimport multiprocessing as mp\nimport time\nfrom pdf2image import convert_from_path, convert_from_bytes\nfrom pdf2image.exceptions import PDFSyntaxError, PDFPageCountError, PDFInfoNotInstalledError\n\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nfrom io import StringIO\n\n\nclass JSTReader:\n\n def __init__(self, directory):\n self.dir = directory # base directory, where JST is uploaded/stored\n self.idir = self.dir + 'images/' # image directory\n pass\n\n def clear_dir(self, mkdir):\n # Empty the working directory\n # Currently done by deleting then (if desired) making it again\n os.system('rm -rf ' + self.dir)\n if mkdir:\n os.system('mkdir ' + self.dir)\n\n def convert_to_image(self, file):\n # new conversion function using pdf2image\n filedir = self.dir + file\n # dpi=300 seems to be the sweet spot\n # using mp.cpu_count() for thread count allows us to multithread without using too many threads\n images = convert_from_path(filedir, dpi=300, thread_count=mp.cpu_count())\n\n index = 0\n for image in images:\n image.save(self.idir + \"image-\" + str(index) + \".jpg\")\n index += 1\n\n def convert_to_text(self, file):\n # convert newly generated image files to text files using pytesseract\n end = len(file) - 4\n filename = file[0:end] + '.txt'\n filestring = pytesseract.image_to_string((Image.open(file)))\n outputfile = open(filename, 'w')\n outputfile.write(filestring)\n outputfile.close()\n\n def scan_file(self, image_based):\n # scan files for course codes\n # uses regex to search text files\n # uses multiprocessing where possible\n accepted_courses = set()\n rejected_courses = set()\n\n # if pdf was image-based, grab a list of images from the image directory\n if image_based:\n image_glob = self.idir + '*.jpg'\n image_list = glob.glob(image_glob)\n image_list.sort()\n\n pool = mp.Pool(mp.cpu_count())\n\n pool.map(self.convert_to_text, [fn for fn in image_list])\n\n file_list = glob.glob(self.idir + '*.txt')\n # print(\"-----------------list of text files-----------------------\")\n # print(file_list)\n # print(os.system('ls ' + self.idir))\n file_list.sort()\n\n flag = True\n add = None\n prev = None\n for filename in file_list:\n if flag:\n for line in open(filename):\n if \"Military Experience\" in line or \"Other Learning Experiences\" in line:\n flag = False\n break\n if line == \"\":\n continue\n if line != \"Military Experience\":\n if re.search(r'((MC|NV|AR|CG|DD)(-|—)[0-9\\s]+-[0-9\\s]+)', line):\n temp = re.findall(r'((MC|NV|AR|CG|DD)(-|—)[0-9\\s]+-[0-9\\s]+)', line)\n # print('---------------------------regex result--------------------')\n # print(temp)\n add = temp[0][0]\n add = add.replace(\" \", \"\")\n add = add.replace(\"—\", \"-\")\n # print('-----------------------course found: -------------------------')\n # print(add)\n if add is not None:\n accepted_courses.add(add)\n prev = add\n add = None\n if \"Credit Is Not Recommended\" in line:\n # print('-------------------------line-----------------------')\n # print(line)\n # print(prev)\n # print(accepted_courses)\n if prev is not None:\n if prev in accepted_courses:\n accepted_courses.remove(prev)\n rejected_courses.add(prev)\n prev = None\n # print(accepted_courses)\n return accepted_courses, rejected_courses\n\n def convert_pdf_to_txt(self, path):\n # convert text-based pdf to text file\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n codec = 'utf-8'\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n fp = open(path, 'rb')\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n password = \"\"\n maxpages = 0\n caching = True\n pagenos = set()\n\n for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):\n interpreter.process_page(page)\n\n text = retstr.getvalue()\n\n fp.close()\n device.close()\n retstr.close()\n return text\n\n def scan_pdf(self):\n accepted_courses = set()\n rejected_courses = set()\n start_time = time.time()\n # get the list of files from the current directory\n files = os.listdir(self.dir)\n\n # filter the list of files for only pdf files\n files = [fi for fi in files if fi.endswith(\".pdf\")]\n\n os.system('mkdir ' + self.idir)\n # print('----------------------------idir has been created---------------------')\n # print(os.getcwd())\n\n # for each pdf\n for pdf in files:\n filestring = self.convert_pdf_to_txt(self.dir + pdf).strip().split(\"\\n\")\n # print('---------------------------filestring--------------------------------')\n # print(filestring)\n if len(filestring[0]) > 0:\n owd = os.getcwd()\n os.chdir(self.idir)\n with open('jst.txt', 'w') as outfile:\n for item in filestring:\n outfile.write(\"%s\\n\" % item)\n os.chdir(owd)\n\n accepted_courses, rejected_courses = self.scan_file(False)\n\n else: # OCR route\n print('------------------image-based PDF, running conversion------------------------')\n conv_start = time.time()\n self.convert_to_image(pdf)\n conv_end = time.time()\n conv_time = conv_end - conv_start\n print('------------- Conversion finished, run time: ', conv_time, '------------------')\n accepted_courses, rejected_courses = self.scan_file(True)\n\n course_dict = {}\n course_dict['accepted'] = sorted(list(accepted_courses))\n course_dict['rejected'] = sorted(list(rejected_courses))\n end_time = time.time()\n runtime = end_time - start_time\n print('-------------------------------- total run time: ', runtime, '---------------------------------')\n return course_dict\n","repo_name":"OlivetACM/MCE-Project-for-MVAA-Frontend","sub_path":"www/home/JSTReader.py","file_name":"JSTReader.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"17718395204","text":"import json, config, requests, time, hashlib, hmac, urllib.request, urllib.parse\nfrom flask import Flask, request, jsonify, render_template\nfrom binance.client import Client\nfrom binance.enums import *\n\napp = Flask(__name__)\n\nclient = Client(config.API_KEY, config.API_SECRET, tld='us')\nclientTestNet = Client(config.API_KEY_TEST, config.API_SECRET_TEST, tld='us')\n\ndef order(side, quantity, symbol, order_type=ORDER_TYPE_MARKET):\n try:\n print(f\"sending order {order_type} - {side} {quantity} {symbol}\")\n # order = client.create_order(symbol=symbol, side=side, type=order_type, quantity=quantity)\n url = \"https://testnet.binance.vision/api/v3/order\"\n headers = {\"Content-Type\":\"application/json\", \"X-MBX-APIKEY\":config.API_KEY_TEST}\n params = {\n \"symbol\": symbol,\n \"side\": side,\n \"type\": order_type,\n \"quantity\": quantity,\n \"timestamp\": int(time.time() * 1000)\n }\n\n secret = bytes(config.API_SECRET_TEST.encode('utf-8'))\n signature = hmac.new(secret, urllib.parse.urlencode(params).encode('utf-8'), hashlib.sha256).hexdigest()\n\n params['signature'] = signature\n\n order = requests.post(url= url, headers= headers, params= params)\n\n print(order.content)\n\n except Exception as e:\n print(\"an exception occurred - {}\".format(e))\n return False\n\n return order\n\n@app.route('/')\ndef welcome():\n return render_template('index.html')\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n #print(request.data)\n data = json.loads(request.data)\n \n if data['passphrase'] != config.WEBHOOK_PASSPHRASE:\n return {\n \"code\": \"error\",\n \"message\": \"Nice try, invalid passphrase\"\n }\n\n side = data['strategy']['order_action'].upper()\n quantity = data['strategy']['order_contracts']\n order_response = order(side, quantity, \"BTCUSDT\")\n\n if order_response:\n return {\n \"code\": \"success\",\n \"message\": \"order executed\"\n }\n else:\n print(\"order failed\")\n\n return {\n \"code\": \"error\",\n \"message\": \"order failed\"\n }","repo_name":"HoiYee97/tradingview-binance-strategy-alert-webhook","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"40569100717","text":"# IOI\nN=int(input())\nlength = int(input())\nstring = input().split('I')\nsol=0\ntmp=0\nfor i in range(len(string)) :\n \n if string[i] == 'O' and i!=len(string)-1:\n \n tmp+=1\n \n \n \n elif i==len(string)-1 or string[i] !='O':\n count = tmp-N+1\n if count > 0:\n sol+=count\n tmp=0\n \nprint(sol)\n\n","repo_name":"gudals113/Algorithms","sub_path":"boj/String/acmicpc-5525.py","file_name":"acmicpc-5525.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71646360022","text":"__copyright__ = \"\"\"\nCopyright (C) 2005, Catalin Marinas \n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License version 2 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, see http://www.gnu.org/licenses/.\n\"\"\"\n\nfrom stgit.argparse import opt\nfrom stgit.commands import common\nfrom stgit.lib import transaction\nfrom stgit.out import *\nfrom stgit import argparse\n\nhelp = 'Permanently store the applied patches into the stack base'\nkind = 'stack'\nusage = ['',\n '[--] ',\n '-n NUM',\n '--all']\ndescription = \"\"\"\nMerge one or more patches into the base of the current stack and\nremove them from the series while advancing the base. This is the\nopposite of 'stg uncommit'. Use this command if you no longer want to\nmanage a patch with StGIT.\n\nBy default, the bottommost patch is committed. If patch names are\ngiven, the stack is rearranged so that those patches are at the\nbottom, and then they are committed.\n\nThe -n/--number option specifies the number of applied patches to\ncommit (counting from the bottom of the stack). If -a/--all is given,\nall applied patches are committed.\"\"\"\n\nargs = [argparse.patch_range(argparse.applied_patches,\n argparse.unapplied_patches)]\noptions = [\n opt('-n', '--number', type = 'int',\n short = 'Commit the specified number of patches'),\n opt('-a', '--all', action = 'store_true',\n short = 'Commit all applied patches')]\n\ndirectory = common.DirectoryHasRepositoryLib()\n\ndef func(parser, options, args):\n \"\"\"Commit a number of patches.\"\"\"\n stack = directory.repository.current_stack\n args = common.parse_patches(args, list(stack.patchorder.all_visible))\n if len([x for x in [args, options.number != None, options.all] if x]) > 1:\n parser.error('too many options')\n if args:\n patches = [pn for pn in stack.patchorder.all_visible if pn in args]\n bad = set(args) - set(patches)\n if bad:\n raise common.CmdException('Nonexistent or hidden patch names: %s'\n % ', '.join(sorted(bad)))\n elif options.number != None:\n if options.number <= len(stack.patchorder.applied):\n patches = stack.patchorder.applied[:options.number]\n else:\n raise common.CmdException('There are not that many applied patches')\n elif options.all:\n patches = stack.patchorder.applied\n else:\n patches = stack.patchorder.applied[:1]\n if not patches:\n raise common.CmdException('No patches to commit')\n\n iw = stack.repository.default_iw\n def allow_conflicts(trans):\n # As long as the topmost patch stays where it is, it's OK to\n # run \"stg commit\" with conflicts in the index.\n return len(trans.applied) >= 1\n trans = transaction.StackTransaction(stack, 'commit',\n allow_conflicts = allow_conflicts)\n try:\n common_prefix = 0\n for i in xrange(min(len(stack.patchorder.applied), len(patches))):\n if stack.patchorder.applied[i] == patches[i]:\n common_prefix += 1\n else:\n break\n if common_prefix < len(patches):\n to_push = [pn for pn in stack.patchorder.applied[common_prefix:]\n if pn not in patches[common_prefix:]]\n # this pops all the applied patches from common_prefix\n trans.pop_patches(lambda pn: pn in to_push)\n for pn in patches[common_prefix:]:\n trans.push_patch(pn, iw)\n else:\n to_push = []\n new_base = trans.patches[patches[-1]]\n for pn in patches:\n trans.patches[pn] = None\n trans.applied = [pn for pn in trans.applied if pn not in patches]\n trans.base = new_base\n out.info('Committed %d patch%s' % (len(patches),\n ['es', ''][len(patches) == 1]))\n for pn in to_push:\n trans.push_patch(pn, iw)\n except transaction.TransactionHalted:\n pass\n return trans.run(iw)\n","repo_name":"dkagedal/stgit","sub_path":"stgit/commands/commit.py","file_name":"commit.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"29610242378","text":"import json\nfrom urllib import request\n\nfrom selenium import webdriver\nimport pytest\n\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef read_jsonfile(path):\n with open(path) as input: # Loading testdata\n Input_Object = json.load(input)\n input.close()\n return Input_Object\n\n\n@pytest.fixture(scope=\"class\")\ndef browser_setup(request):\n driver = webdriver.Chrome()\n driver.implicitly_wait(5)\n driver.maximize_window()\n request.instance.driver = driver\n yield\n driver.close()\n driver.quit()\n","repo_name":"Guna1388/automation","sub_path":"Python_automation_framework/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"817327034","text":"A_spisok=input()\nB_spisok=input()\nap=0\nbp=0\na=A_spisok.split(' ')\nb=B_spisok.split(' ')\nfor i in range(3):\n a[i]=int(a[i])\n b[i]=int(b[i])\n if a[i]>b[i]:\n ap+=1\n elif a[i] -f -u -e ')\n sys.exit(2)\n market_analysis = None\n frequency = None\n user_id = None\n env = None\n for opt, arg in opts:\n if opt == '-h':\n print('tweepyStreamApp.py -m -f -u -e ')\n sys.exit()\n elif opt in (\"-m\", \"--market_analysis\"):\n market_analysis = arg\n elif opt in (\"-f\", \"--frequency\"):\n frequency = arg\n elif opt in (\"-u\", \"--user_id\"):\n user_id = arg\n elif opt in (\"-e\", \"--env\"):\n env = arg\n\n payload = {'process': 'stock_quotes',\n 'market_analysis': market_analysis,\n 'env': env,\n 'user_id': user_id}\n\n batch_url = config_vars.BATCH_URL\n batch_request_url = batch_url + 'start_stock_quote'\n return batch_request_url, payload, frequency\n\n\nif __name__ == \"__main__\":\n batch_details = get_batch_request_details(sys.argv[1:])\n request_url = batch_details[0]\n request_payload = batch_details[1]\n\n\n def request_batch(url=request_url, payload=request_payload):\n requests.post(url, json=payload)\n\n schedule.every().monday.at(\"16:01\").do(request_batch)\n schedule.every().tuesday.at(\"16:01\").do(request_batch)\n schedule.every().wednesday.at(\"16:01\").do(request_batch)\n schedule.every().thursday.at(\"16:01\").do(request_batch)\n schedule.every().friday.at(\"16:01\").do(request_batch)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"MSnowman/social-analytics","sub_path":"schedulerapp/main/scheduler/stock_quote_scheduler.py","file_name":"stock_quote_scheduler.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"6447688116","text":"# https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/steps-to-create-a-smart-home-skill\n# https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/smart-home-skill-api-reference\nimport os\nimport sys\nfrom argparse import Namespace\n\n\n\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport ssl\nimport json\nimport uuid\nfrom datetime import datetime\nimport colorsys\n\nfrom . import p3_tools as p3tools\n\n \nALEXA_Range_Controller={}\n\nDEFAULT_RANGE = (0, 100)\nDEFAULT_RANGE_LOGIC = (True, False)\n\n\n## Start - Definitions for global used Utterances from the global catalog\nALEXA_Range_Controller[\"capabilityResources\"] = {\"friendlyNames\":[{\"@type\":\"asset\",\"value\":{\"assetId\":\"Alexa.Setting.Opening\"}}]}\nALEXA_Range_Controller[\"configuration\"] = {\"supportedRange\":{\"minimumValue\":0,\"maximumValue\":100,\"precision\":1},\"unitOfMeasure\":\"Alexa.Unit.Percent\"}\nALEXA_Range_Controller[\"semantics\"] = {\"actionMappings\":[{\"@type\":\"ActionsToDirective\",\"actions\":[\"Alexa.Actions.Close\"],\"directive\":{\"name\":\"SetRangeValue\",\"payload\":{\"rangeValue\":100}}},{\"@type\":\"ActionsToDirective\",\"actions\":[\"Alexa.Actions.Open\"],\"directive\":{\"name\":\"SetRangeValue\",\"payload\":{\"rangeValue\":0}}},{\"@type\":\"ActionsToDirective\",\"actions\":[\"Alexa.Actions.Lower\"],\"directive\":{\"name\":\"AdjustRangeValue\",\"payload\":{\"rangeValueDelta\":999,\"rangeValueDeltaDefault\":False}}},{\"@type\":\"ActionsToDirective\",\"actions\":[\"Alexa.Actions.Raise\"],\"directive\":{\"name\":\"AdjustRangeValue\",\"payload\":{\"rangeValueDelta\":-999,\"rangeValueDeltaDefault\":False}}}],\"stateMappings\":[{\"@type\":\"StatesToValue\",\"states\":[\"Alexa.States.Closed\"],\"value\":0},{\"@type\":\"StatesToRange\",\"states\":[\"Alexa.States.Open\"],\"range\":{\"minimumValue\":1,\"maximumValue\":100}}]}\n\n## End - Definitions for global used Utterances from the global catalog\n \n\n\ndef what_percentage(value, range):\n _min, _max = range\n return ( (value - _min) / (_max - _min) ) * 100\n\ndef percent_2_kelvin(value, device_range):\n _minDevice, _maxDevice = device_range\n range2Set = _maxDevice - _minDevice\n \n kelvin2Add = (value/100.0*range2Set)\n \n value_new = round((kelvin2Add+_minDevice)/10)*10\n if value_new > _maxDevice:\n value_new = _maxDevice\n if value_new < _minDevice:\n value_new = _minDevice\n \n \n return value_new\n\nclass AlexaService(object):\n def __init__(self, Proto, logger, version, devices, actions, host, port, auth=None, https_certfile=None, https_keyfile=None):\n self.logger = logger\n self.version = version\n self.devices = devices\n self.actions = actions\n self._proto = Proto\n self._protocol = Proto\n self.logger.info(\"Alexa: service setup at {}:{}\".format(host, port))\n\n handler_factory = lambda *args: AlexaRequestHandler(self._proto,logger, version, devices, actions, *args)\n self.server = HTTPServer((host, port), handler_factory)\n\n if https_certfile: # https://www.piware.de/2011/01/creating-an-https-server-in-python/\n self.logger.info(\"Alexa: enabling SSL/TLS support with cert-file {} & key-file {}\".format(https_certfile, https_keyfile))\n # TODO: client-certificates can be handled here as well: https://docs.python.org/2/library/ssl.html\n self.server.socket = ssl.wrap_socket(self.server.socket, server_side=True, certfile=https_certfile, keyfile=https_keyfile)\n\n def start(self):\n self.logger.info(\"Alexa: service starting\")\n self._proto.addEntry('INFO ', \"Alexa - service starting\")\n self.server.serve_forever()\n \n\n def stop(self):\n self.logger.info(\"Alexa: service stopping\")\n self.server.shutdown()\n \n \n\nclass AlexaRequestHandler(BaseHTTPRequestHandler):\n def __init__(self,Proto, logger, version, devices, actions, *args):\n self.logger = logger\n self.version = version\n self.devices = devices\n self.actions = actions\n self._proto = Proto\n BaseHTTPRequestHandler.__init__(self, *args)\n \n \n\n \n \n \n # find Value for Key in Json-structure\n # needed for Alexa Payload V3\n \n def search(self,p, strsearch):\n if type(p) is dict: \n if strsearch in p:\n tokenvalue = p[strsearch]\n if not tokenvalue is None:\n return tokenvalue\n else:\n for i in p:\n tokenvalue = self.search(p[i], strsearch) \n if not tokenvalue is None:\n return tokenvalue \n def replace(self,p, strsearch, newValue):\n if type(p) is dict: \n if strsearch in p:\n tokenvalue = p[strsearch]\n p[strsearch] = newValue\n if not tokenvalue is None:\n return tokenvalue\n else:\n for i in p:\n tokenvalue = self.replace(p[i], strsearch,newValue) \n if not tokenvalue is None:\n return tokenvalue\n \n def GenerateThermoList(self, myModes, listType):\n mylist = myModes.split(' ')\n myValueList = {}\n myModeList = {}\n for i in mylist:\n key=i.split(':')\n myValueList[key[0]]=key[1]\n myModeList[key[1]] = key[0]\n if listType == 1:\n return myValueList\n elif listType == 2:\n return myModeList \n def do_POST(self):\n self.logger.debug(\"{} {} {}\".format(self.request_version, self.command, self.path))\n try:\n length = int(self.headers.get('Content-Length'))\n data = self.rfile.read(length).decode('utf-8')\n req = json.loads(data)\n #======================================\n # Test Payloadversion\n #======================================\n payloadVersion = self.search( req,'payloadVersion')\n #======================================\n # PayloadVersion = 2 -> Standard Handling\n #======================================\n \n if payloadVersion == '2':\n \n self.logger.debug(\"Alexa: received Request with payload : 2\") \n header = req['header']\n payload = req['payload']\n \n if header['namespace'] == 'Alexa.ConnectedHome.System':\n return self.handle_system(header, payload)\n \n elif header['namespace'] == 'Alexa.ConnectedHome.Discovery':\n return self.handle_discovery(header, payload)\n \n elif header['namespace'] == 'Alexa.ConnectedHome.Control':\n return self.handle_control(header, payload)\n \n else:\n msg = \"unknown `header.namespace` '{}'\".format(header['namespace'])\n self.logger.error(msg)\n self.send_error(400, explain=msg)\n #======================================\n # PayloadVersion = 3 -> new Handling\n #======================================\n elif payloadVersion == '3':\n self.logger.debug(\"Alexa: received Request with payload : 3\")\n mydirective = self.search( req,'directive')\n header = self.search( req,'header')\n payload = self.search( req,'payload')\n if header['namespace'] == 'Alexa.Discovery':\n return self.p3_handle_discovery(header, payload)\n elif header['namespace'] == 'Alexa':\n return self.p3_handle_control(header, payload,mydirective)\n \n elif mydirective != None:\n return self.p3_handle_control(header, payload,mydirective)\n else:\n msg = \"unknown `header.namespace` '{}'\".format(header['namespace'])\n self.logger.error(msg)\n self.send_error(400, explain=msg)\n else:\n self.send_error(500,\"Request with unknown Payload '{}'\".format(payloadVersion))\n except Exception as e:\n self.send_error(500, explain=str(e))\n \n def respond(self, response):\n data = json.dumps(response).encode('utf-8')\n self.send_response(200)\n self.send_header('Content-Type', 'application/json')\n self.send_header('Content-Length', len(data))\n self.end_headers()\n self.wfile.write(data)\n #========================================================\n # A.Kohler 22.06.2018\n #========================================================\n \n def handle_system(self, header, payload):\n directive = header['name']\n self.logger.debug(\"Alexa: system-directive '{}' received\".format(directive))\n\n if directive == 'HealthCheckRequest':\n self.respond(self.confirm_health(payload))\n else:\n msg = \"unknown `header.name` '{}'\".format(directive)\n self.logger.error(msg)\n self.send_error(400, explain=msg)\n\n def confirm_health(self, payload):\n requested_on = payload['initiationTimestamp']\n self.logger.debug(\"Alexa: confirming health as requested on {}\".format(requested_on))\n return {\n 'header': self.header('HealthCheckResponse', 'Alexa.ConnectedHome.System'),\n 'payload': {\n 'description': 'The system is currently healthy',\n 'isHealthy': True\n }}\n \n def handle_discovery(self, header, payload):\n directive = header['name']\n self.logger.debug(\"AlexaP3: discovery-directive '{}' received\".format(directive))\n\n if directive == 'DiscoverAppliancesRequest':\n #myResponse = self.discover_appliances()\n self.respond(self.discover_appliances())\n else:\n msg = \"unknown `header.name` '{}'\".format(directive)\n self.logger.error(msg)\n self.send_error(400, explain=msg)\n \n \n # Handling für Payload V3 Discovery\n \n def p3_handle_discovery(self, header, payload):\n directive = header['name']\n self.logger.debug(\"AlexaP3: discovery-directive '{}' received\".format(directive))\n self._proto.addEntry('INFO ', \"AlexaP3: discovery-directive '{}' received\".format(directive))\n\n if directive == 'Discover':\n self.respond(self.p3_discover_appliances())\n else:\n msg = \"unknown `header.name` '{}'\".format(directive)\n self.logger.error(msg)\n self._proto.addEntry('ERROR ', msg)\n self.send_error(400, explain=msg)\n\n def p3_discover_appliances(self):\n discovered = []\n for device in self.devices.all():\n mycapabilities = []\n\n newcapa = {\"type\": \"AlexaInterface\",\n \"interface\": \"Alexa\",\n \"version\": \"3\"\n }\n mycapabilities.append(newcapa)\n \n # Standard capability for Connectivity\n newcapa = {\"type\": \"AlexaInterface\",\n \"interface\": \"Alexa.EndpointHealth\",\n \"version\": \"3\",\n \"properties\" : {\n \"supported\": [\n {\n \"name\": \"connectivity\"\n }\n ],\n \"proactivelyReported\": False,\n \"retrievable\": True\n }\n }\n mycapabilities.append(newcapa)\n \n # Start - Check Namespaces for Actions\n myNameSpace = {}\n myItems = device.backed_items()\n for Item in myItems:\n # Get all Actions for this item\n action_names = list( map(str.strip, Item.conf['alexa_actions'].split(' ')) )\n # über alle Actions für dieses item\n for myActionName in action_names:\n myAction = self.actions.by_name(myActionName)\n if myAction.namespace not in str(myNameSpace):\n myNameSpace[myAction.namespace] = myAction.response_type\n \n for NameSpace in myNameSpace:\n print (NameSpace, 'correspondend to ', myNameSpace[NameSpace])\n # End - Check Namespaces\n \n \n \n if len(myNameSpace) != 0: \n for NameSpace in myNameSpace:\n\n newcapa = {}\n newcapa = {\n \"type\": \"AlexaInterface\",\n \"interface\": NameSpace,\n \"version\": \"3\",\n \"properties\": {\n \"supported\": [\n {\n \"name\": myNameSpace[NameSpace]\n }\n ],\n \"proactivelyReported\": device.proactivelyReported,\n \"retrievable\": device.retrievable\n }\n }\n # Check of special NameSpace\n \n if NameSpace == 'Alexa.ThermostatController':\n AlexaItem = self.devices.get(device.id)\n myModeList = self.GenerateThermoList(AlexaItem.thermo_config, 2)\n myModes = []\n for mode in myModeList:\n myModes.append(mode)\n mysupported = {\n \"supportsScheduling\": False,\n \"supportedModes\":\n myModes\n }\n newcapa['properties']['configuration'] = mysupported\n mysupported=[\n {\"name\" : 'thermostatMode'},\n {\"name\" : 'targetSetpoint'}\n ]\n newcapa['properties']['supported'] = mysupported\n \n\n \n if NameSpace == 'Alexa.SceneController':\n newcapa={\"type\": \"AlexaInterface\",\n \"interface\": NameSpace,\n \"version\": \"3\",\n \"supportsDeactivation\" : False\n } \n if NameSpace == 'Alexa.CameraStreamController':\n AlexaItem = self.devices.get(device.id)\n\n myStreams = p3tools.CreateStreamSettings(AlexaItem)\n newcapa={\"type\": \"AlexaInterface\",\n \"interface\": NameSpace,\n \"version\": \"3\",\n \"cameraStreamConfigurations\" : myStreams\n } \n if NameSpace == 'Alexa.PlaybackController':\n AlexaItem = self.devices.get(device.id)\n myModeList= AlexaItem.supported_actions()\n myModes = []\n for mode in myModeList:\n if mode in \"Play Stop FastForward Next Pause Previous Rewind StartOver\":\n myModes.append(mode)\n newcapa={\"type\": \"AlexaInterface\",\n \"interface\": \"Alexa.PlaybackController\",\n \"version\": \"3\",\n \"supportedOperations\" : myModes\n }\n \n \n if NameSpace == 'Alexa.RangeController':\n try:\n if hasattr(device, \"alexa_range_delta\"):\n alexa_range_delta = device.alexa_range_delta\n else:\n alexa_range_delta = 20 # default\n \n myConfig = json.dumps(ALEXA_Range_Controller[\"semantics\"])\n myConfig = myConfig.replace(\"-999\",str(int(alexa_range_delta)*-1))\n myConfig = myConfig.replace(\"999\",str(int(alexa_range_delta)))\n myConfig = json.loads(myConfig)\n \n if (\"EXTERIOR_BLIND\" in device.icon or \"INTERIOR_BLIND\" in device.icon):\n newcapa[\"instance\"] = device.id\n newcapa[\"capabilityResources\"] = ALEXA_Range_Controller[\"capabilityResources\"]\n newcapa[\"configuration\"] = ALEXA_Range_Controller[\"configuration\"]\n newcapa[\"semantics\"] = myConfig\n \n except Exception as e:\n pass\n\n \n # End Check special Namespace\n mycapabilities.append(newcapa)\n if device.icon == None:\n device.icon = [\"SWITCH\"]\n \n \n \n appliance = {\n \"endpointId\": device.id,\n \"friendlyName\": device.name,\n \"description\": device.description + \" by SmartHomeNG\",\n \"manufacturerName\": \"SmarthomeNG\",\n \"displayCategories\": \n device.icon,\n \"cookie\": {\n 'extraDetail{}'.format(idx+1) : item.id() for idx, item in enumerate(device.backed_items())\n },\n \"capabilities\" : \n mycapabilities\n }\n discovered.append(appliance)\n \n \n return {\n \"event\": {\n \"header\": {\n \"namespace\": \"Alexa.Discovery\",\n \"name\": \"Discover.Response\",\n \"payloadVersion\": \"3\",\n \"messageId\": uuid.uuid4().hex\n },\n \"payload\": {\n \"endpoints\": \n discovered\n }\n }\n }\n \n\n # https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/smart-home-skill-api-reference#discovery-messages\n \n def discover_appliances(self):\n discovered = []\n for device in self.devices.all():\n appliance = {\n 'actions': device.supported_actions(),\n 'additionalApplianceDetails': {\n 'item{}'.format(idx+1) : item.id() for idx, item in enumerate(device.backed_items())\n },\n 'applianceId': device.id,\n 'friendlyDescription': device.description,\n 'friendlyName': device.name,\n 'isReachable': True,\n 'manufacturerName': 'SmartHomeNG',\n 'modelName': 'SmartHomeNG',\n 'version': self.version\n }\n if device.types:\n appliance['applianceTypes'] = device.types\n discovered.append(appliance)\n\n return {\n 'header': self.header('DiscoverAppliancesResponse', 'Alexa.ConnectedHome.Discovery'),\n 'payload': {\n 'discoveredAppliances': discovered\n }\n }\n # ================================================\n # Resportstate for all devices\n # ================================================\n \n def p3_ReportState(self, directive):\n now = datetime.now().isoformat()\n myTimeStamp = now[0:22]+'Z'\n device_id = directive['endpoint']['endpointId']\n \n AlexaItem = self.devices.get(device_id)\n myItems = AlexaItem.backed_items()\n self._proto.addEntry('INFO ', \"received ReportState for '{}' \".format(device_id))\n Properties = []\n differentNameSpace = ''\n myValue = None \n alreadyReportedControllers = []\n # walk over all Items examp..: Item: OG.Flur.Spots.dimmen / Item: OG.Flur.Spots\n for Item in myItems:\n msg = \"\"\n # Get all Actions for this item\n action_names = list( map(str.strip, Item.conf['alexa_actions'].split(' ')) )\n # über alle Actions für dieses item\n for myActionName in action_names:\n try:\n myAction = self.actions.by_name(myActionName)\n differentNameSpace = ''\n # all informations colletec (Namespace, ResponseTyp, .....\n # check if capabilitie is alredy in\n self.logger.debug(\"Alexa: ReportState for {}\".format(Item._name))\n if myAction.response_type not in str(alreadyReportedControllers):\n #if myAction.namespace not in str(alreadyReportedControllers):\n #if myAction.response_type not in str(Properties):\n Propertyname = myAction.response_type\n if myAction.namespace == \"Alexa.PowerController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n myValue=Item()\n if myValue == 0:\n myValue = 'OFF'\n elif myValue == 1:\n myValue = 'ON'\n \n elif myAction.namespace == \"Alexa.RangeController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_range = Item.alexa_range\n item_now = Item()\n myValue = int(what_percentage(item_now, item_range))\n \n elif myAction.namespace == \"Alexa.ColorTemperatureController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_now = Item()\n myPercentage = int(what_percentage(item_now, [0,255]))\n myValue = percent_2_kelvin(myPercentage,Item.alexa_range)\n \n elif myAction.namespace == \"Alexa.BrightnessController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_range = Item.alexa_range\n item_now = Item()\n myValue = int(what_percentage(item_now, item_range))\n \n elif myAction.namespace == \"Alexa.PowerLevelController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_range = Item.alexa_range\n item_now = Item()\n myValue = int(what_percentage(item_now, item_range))\n \n elif myAction.namespace == \"Alexa.LockController\" and myAction.name != 'ReportLockState':\n continue\n \n elif myAction.namespace == \"Alexa.LockController\" and myAction.name == 'ReportLockState':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_new = Item()\n if item_new == 0:\n myValue = 'UNLOCKED'\n elif item_new == 1:\n myValue = 'LOCKED'\n elif item_new == 254:\n myValue = 'JAMMED'\n else:\n myValue = 'JAMMED' # no known value -> blocked\n \n \n elif myAction.namespace == \"Alexa.PercentageController\":\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_range = Item.alexa_range\n item_now = Item()\n myValue = int(what_percentage(item_now, item_range))\n \n elif myAction.namespace == \"Alexa.ThermostatController\" and myAction.response_type == 'targetSetpoint':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_now = Item()\n myValue = {\n \"value\": item_now,\n \"scale\": \"CELSIUS\"\n }\n elif myAction.namespace == \"Alexa.ThermostatController\" and myAction.response_type == 'thermostatMode':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_now = Item()\n myModes = AlexaItem.thermo_config\n myValueList = self.GenerateThermoList(myModes,1)\n myIntMode = int(item_now)\n myMode = self.search(myValueList, str(myIntMode))\n \n myValue = myMode\n \n elif myAction.namespace == 'Alexa.TemperatureSensor':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n item_now = Item()\n myValue = {\n \"value\": item_now,\n \"scale\": \"CELSIUS\"\n }\n \n elif myAction.namespace == 'Alexa.ContactSensor':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n myValue=Item()\n if myValue == 0:\n myValue = 'DETECTED' # means Contact is open\n elif myValue == 1:\n myValue = 'NOT_DETECTED' # means Contact is closed\n \n elif myAction.namespace == 'Alexa.ColorController':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n myValue=Item()\n if len(myValue) == 0:\n myValue.append(0)\n myValue.append(0)\n myValue.append(0)\n try:\n myColorTyp = Item.conf['alexa_color_value_type']\n except Exception as err:\n # default = RGB\n myColorTyp = 'RGB'\n if myColorTyp == 'HSB':\n myHSB = myValue\n else:\n try:\n myHSB = p3tools.rgb_to_hsv(myValue[0], myValue[1], myValue[2])\n except Exception as err:\n print(err)\n \n myValue ={\n \"hue\": myHSB[0],\n \"saturation\": myHSB[1],\n \"brightness\": myHSB[2]\n }\n elif myAction.namespace == 'Alexa.PlaybackController':\n msg=\"calculating Value for Controller'{}' - Item : {} \".format(Item.property.name,myAction.namespace)\n differentNameSpace = 'Alexa.PlaybackStateReporter'\n myValue ={\n \"state\": \"PLAYING\"\n } \n if differentNameSpace == '':\n differentNameSpace = myAction.namespace \n \n #====================================================\n # Add default values if nothing is reported\n #====================================================\n if myAction.namespace not in alreadyReportedControllers:\n if myAction.namespace == 'Alexa.LockController' and myValue == None:\n myValue = 'LOCKED'\n \n MyNewProperty = {\n \"namespace\":differentNameSpace,\n \"name\":Propertyname,\n \"value\":myValue,\n \"timeOfSample\":myTimeStamp,\n \"uncertaintyInMilliseconds\":5000\n }\n \n # Take care for Controllers with instances\n if differentNameSpace == 'Alexa.RangeController':\n MyNewProperty[\"instance\"] = device_id\n \n \n Properties.append(MyNewProperty)\n alreadyReportedControllers.append(myAction.response_type)\n \n #alreadyReportedControllers.append(myAction.namespace)\n except:\n self._proto.addEntry('ERROR ', msg)\n # Add the EndpointHealth Property\n MyNewProperty ={\n \"namespace\": \"Alexa.EndpointHealth\",\n \"name\": \"connectivity\",\n \"value\": {\n \"value\": \"OK\"\n },\n \"timeOfSample\": myTimeStamp,\n \"uncertaintyInMilliseconds\": 5000\n }\n \n \n Properties.append(MyNewProperty)\n \n myEndpoint = self.search(directive,'endpoint')\n myScope = self.search(directive,'scope')\n myEndPointID = self.search(directive,'endpointId')\n myHeader = self.search(directive,'header')\n now = datetime.now().isoformat()\n myTimeStamp = now[0:22]+'Z'\n self.replace(myHeader,'messageId',uuid.uuid4().hex)\n self.replace(myHeader,'name','StateReport')\n self.replace(myHeader,'namespace','Alexa')\n \n # Special things for special Controller\n #if myAction.namespace == 'Alexa.PlaybackController':\n # Properties = []\n \n myResponse = {\n \"context\": {\n \"properties\": Properties \n },\n \"event\": {\n \"header\": myHeader\n ,\n \"endpoint\" : {\n \"scope\": myScope,\n \"endpointId\": myEndPointID \n },\n \"payload\": {}\n }\n }\n \n \n self._proto.addEntry('INFO ', \"respondig ReportState for '{}' \".format(myEndPointID))\n return myResponse\n \n def p3_handle_control(self, header, payload,mydirective):\n directive = header['name']\n try:\n device_id = mydirective[\"endpoint\"][\"endpointId\"]\n except :\n device_id = 'unknown'\n self.logger.debug(\"Alexa: control-directive '{}' received\".format(directive))\n \n if header['name'] == 'ReportState':\n directive = header['namespace']+header['name']\n try:\n self.respond( self.p3_ReportState(mydirective))\n return\n except Exception as e:\n self.logger.error(\"Alexa P3: execution of control-directive '{}' failed: {}\".format(directive, e))\n self._proto.addEntry('ERROR ', \"Alexa P3: execution of control-directive '{}' failed: {}\".format(directive, e))\n self.respond({\n 'header': self.header('DriverInternalError', 'Alexa.ConnectedHome.Control'),\n 'payload': {}\n })\n return\n self._proto.addEntry('INFO ', \"received Directive {} for '{}' Payload : {}\".format(directive, device_id, json.dumps(mydirective)))\n action = self.actions.for_directive(directive)\n if action:\n try:\n self._proto.addEntry('INFO ', \"response Payload : {}\".format(json.dumps(mydirective)))\n self.respond( action(mydirective) )\n except Exception as e:\n self.logger.error(\"Alexa P3: execution of control-directive '{}' failed: {}\".format(directive, e))\n self._proto.addEntry('ERROR ', \"Alexa P3: execution of control-directive '{}' failed: {}\".format(directive, e))\n self.respond({\n 'header': self.header('DriverInternalError', 'Alexa.ConnectedHome.Control'),\n 'payload': {}\n })\n else:\n self.logger.error(\"Alexa P3: no action implemented for directive '{}'\".format(directive))\n self._proto.addEntry('ERROR ', \"Alexa P3: no action implemented for directive '{}'\".format(directive))\n self.respond({\n 'header': self.header('UnexpectedInformationReceivedError', 'Alexa.ConnectedHome.Control'),\n 'payload': {}\n })\n \n\n def handle_control(self, header, payload):\n directive = header['name']\n self.logger.debug(\"Alexa: control-directive '{}' received\".format(directive))\n\n action = self.actions.for_directive(directive)\n if action:\n try:\n self.respond( action(payload) )\n except Exception as e:\n self.logger.error(\"Alexa: execution of control-directive '{}' failed: {}\".format(directive, e))\n self.respond({\n 'header': self.header('DriverInternalError', 'Alexa.ConnectedHome.Control'),\n 'payload': {}\n })\n else:\n self.logger.error(\"Alexa: no action implemented for directive '{}'\".format(directive))\n self.respond({\n 'header': self.header('UnexpectedInformationReceivedError', 'Alexa.ConnectedHome.Control'),\n 'payload': {}\n })\n\n def header(self, name, namespace):\n return {\n 'messageId': uuid.uuid4().hex,\n 'name': name,\n 'namespace': namespace,\n 'payloadVersion': '2'\n }\n \n","repo_name":"smarthomeNG/plugins","sub_path":"alexa4p3/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":36117,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"12"} +{"seq_id":"41558079214","text":"billete_100k = 100000\nbillete_50k = 50000\nbillete_20k = 20000\nbillete_10k = 10000\n\ndinero_a_retirar = int(input(\"Digite el valor a retirar. Recuerde que debe ser multiplo de $10.000: \"))\nbilletes_100k = dinero_a_retirar // billete_100k #5\nresiduo = dinero_a_retirar % billete_100k #60000\n\nbilletes_50k = residuo // billete_50k #1\nresiduo = residuo % billete_50k #10000\n\nbilletes_20k = residuo // billete_20k #0\nresiduo = residuo % billete_20k #10000\n\nbilletes_10k = residuo // billete_10k #1\n\n#print(str(billetes_100k) + \" x $100000\")\n#print(str(billetes_100k), \"x $100000\")\nprint(billetes_100k, \"x $100000\")\nprint(billetes_50k, \"x $50000\")\nprint(billetes_20k, \"x $20000\")\nprint(billetes_10k, \"x $10000\")\n#print('{} x $100000 Billetes de 50=$ {} Billetes de 20=$ {} Billetes de 10=$ {}'.format(billetes_100k,billetes_50k,billetes_20k, billetes_10k))","repo_name":"jdvpl/Python","sub_path":"Universidad Nacional/monitorias/Ejercicios/6-operaciones aritmeticas y logicas/banco_unal.py","file_name":"banco_unal.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"16950704170","text":"import random\r\nimport pygame\r\n \r\n\r\ndef title_icon(caption, image):\r\n pygame.display.set_caption(caption)\r\n icon = pygame.image.load(image)\r\n pygame.display.set_icon(icon)\r\n\r\ndef image(imagename, imageX, imageY):\r\n k = pygame.image.load(imagename)\r\n screen.blit(k, (imageX, imageY))\r\n\r\ndef buttons(picture, position, surface):\r\n image = pygame.image.load(picture)\r\n imagerect = image.get_rect()\r\n imagerect.topright = position\r\n surface.blit(image,imagerect)\r\n return (image,imagerect)\r\n\r\ndef music(sound):\r\n bg_music = pygame.mixer.music.load(sound)\r\n pygame.mixer.music.play(-1)\r\n\r\ndef diamond_deck():\r\n diamond_deck = []\r\n for i in range(len(rankname)):\r\n diamond_deck.append(\"{}_of_diamonds.png\".format(rankname[i]))\r\n random.shuffle(diamond_deck)\r\n return diamond_deck\r\n\r\ndef decks():\r\n deck = []\r\n for i in range(len(rankname)):\r\n for j in range(len(suits)):\r\n deck.append(\"{}_of_{}.png\".format(rankname[i], suits[j]))\r\n random.shuffle(deck)\r\n return deck\r\n\r\ndef card_value(rank):\r\n value = {\"2\" : 2, \"3\" : 3, \"4\" : 4, \"5\" : 5, \"6\" : 6, \"7\" : 7, \"8\" : 8, \"9\" : 9, \"1\" : 10, \"k\" : 10, \"q\" : 10, \"j\" : 10, \"a\" : 11}\r\n return value[rank]\r\n\r\ndef diamond_points(rank):\r\n points = {\"2\" : 3, \"3\" : 3, \"4\" : 3, \"5\" : 3, \"6\" : 3, \"7\" : 3, \"8\" : 3, \"9\" : 3, \"1\" : 3, \"a\" : 20, \"k\" : 10, \"q\" : 10, \"j\" : 10}\r\n return points[rank]\r\n\r\ndef display_names():\r\n diamond = font.render(\"Diamond card\", True, (0,0,0))\r\n player1 = font.render(\"Player 1\", True, (0,0,0))\r\n player2 = font.render(\"Player 2\", True, (0,0,0))\r\n player3 = font.render(\"Player 3\", True, (0,0,0))\r\n screen.blit(diamond, (950,270))\r\n screen.blit(player1, (627, 400))\r\n screen.blit(player2, (122, 270))\r\n screen.blit(player3, (292,270))\r\n \r\ndef score(sco1, sco2, sco3): \r\n s1 = font.render(\"Score1: \"+str(sco1), True, (0,0,0))\r\n s2 = font.render(\"Score2: \"+str(sco2), True, (0,0,0))\r\n s3 = font.render(\"Score3: \"+str(sco3), True, (0,0,0))\r\n screen.blit(s1, (1260, 60))\r\n screen.blit(s2, (1260, 110))\r\n screen.blit(s3, (1260, 160))\r\n\r\ndef winner(s1, s2, s3):\r\n if max(s1, s2, s3) == s1 and max(s1, s2, s3) == s2 and max(s1, s2, s3) == s3:\r\n w = font.render(\"Yay! player 1, player 2 and player 3 are the winners\", True, (0, 0, 0))\r\n screen.blit(w, (350, 40))\r\n elif max(s1, s2, s3) == s1 and max(s1, s2, s3) == s2:\r\n w = font.render(\"Yay! player 1 and player 2 are the winners\", True, (0, 0, 0))\r\n screen.blit(w, (410, 40))\r\n \r\n elif max(s1, s2, s3) == s2 and max(s1, s2, s3) == s3:\r\n w = font.render(\"Yay! player 2 and player 3 are the winners\", True, (0, 0, 0))\r\n screen.blit(w, (410, 40))\r\n \r\n elif max(s1, s2, s3) == s1 and max(s1, s2, s3) == s3:\r\n w = font.render(\"Yay! player 1 and player 3 are the winners\", True, (0, 0, 0))\r\n screen.blit(w, (410, 40))\r\n \r\n elif max(s1, s2, s3) == s1:\r\n w = font.render(\"Yay! player 1 is the winner\", True, (0, 0, 0))\r\n screen.blit(w, (500, 40))\r\n \r\n elif max(s1, s2, s3) == s2:\r\n w = font.render(\"Yay! player 2 is the winner\", True, (0, 0, 0))\r\n screen.blit(w, (500, 40))\r\n \r\n elif max(s1, s2, s3) == s3:\r\n w = font.render(\"Yay! player 3 is the winner\", True, (0, 0, 0))\r\n screen.blit(w, (500, 40))\r\n \r\npygame.init()\r\npygame.font.init()\r\n\r\nfont = pygame.font.SysFont(\"ArialCEItalic.ttf\", 45)\r\n \r\nscreen = pygame.display.set_mode((1450,700))\r\n\r\nrankname = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"ace\", \"king\", \"queen\", \"jack\"]\r\nsuits = [\"hearts\", \"spades\", \"clubs\"]\r\n \r\n\r\ndiamondcards = diamond_deck()\r\nd = decks()\r\np1_cards = d[0:39:3]\r\np2_cards = d[1:39:3]\r\np3_cards = d[2:39:3]\r\nscore1, score2, score3 = 0, 0, 0\r\n\r\ndef quitwindow():\r\n screen2 = pygame.display.set_mode((1450,700))\r\n\r\n pygame.display.set_caption(\"Diamonds\")\r\n icon = pygame.image.load('icon1.jpg')\r\n pygame.display.set_icon(icon)\r\n\r\n running = True \r\n while running:\r\n screen.fill((200,220,240))\r\n\r\n playagain = buttons(\"PLAYAGAIN2.PNG\", (600,330), screen)\r\n quit = buttons(\"QUIT2.png\", (1100,330), screen)\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if pygame.mouse.get_pressed()[0] == 1:\r\n mouse = pygame.mouse.get_pos()\r\n \r\n if quit[1].collidepoint(mouse):\r\n running = False\r\n pygame.quit()\r\n if playagain[1].collidepoint(mouse):\r\n gamewindow()\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef gamewindow():\r\n screen1 = pygame.display.set_mode((1450,700))\r\n title_icon(\"Diamonds\", \"icon1.jpg\")\r\n\r\n music('MUSIC.mp3')\r\n \r\n diamond_pick = random.choice(diamondcards)\r\n print(\"Diamond card picked by banker: \",diamond_pick)\r\n\r\n play1_pick = p1_cards[0]\r\n play2_pick = random.choice(p2_cards)\r\n play3_pick = random.choice(p3_cards)\r\n print(\"card picked by player2:\",play2_pick)\r\n print(\"card picked by player3:\",play3_pick)\r\n\r\n flag, flag1 = 0, 0\r\n\r\n running = True \r\n while running:\r\n \r\n screen1.fill((0,200,0))\r\n bgmusic = buttons('music1.png', (50, 10), screen1)\r\n dcard = buttons('card_back.png', (675,100), screen1)\r\n score(\" \",\" \",\" \")\r\n display_names()\r\n \r\n p1_card1 = buttons(p1_cards[0], (115, 470), screen1)\r\n p1_card2 = buttons(p1_cards[1], (225, 470), screen1)\r\n p1_card3 = buttons(p1_cards[2], (335, 470), screen1)\r\n p1_card4 = buttons(p1_cards[3], (445, 470), screen1)\r\n p1_card5 = buttons(p1_cards[4], (555, 470), screen1)\r\n p1_card6 = buttons(p1_cards[5], (665, 470), screen1)\r\n p1_card7 = buttons(p1_cards[6], (775, 470), screen1)\r\n p1_card8 = buttons(p1_cards[7], (885, 470), screen1)\r\n p1_card9 = buttons(p1_cards[8], (995, 470), screen1)\r\n p1_card10 = buttons(p1_cards[9], (1105, 470), screen1)\r\n p1_card11 = buttons(p1_cards[10], (1215, 470), screen1)\r\n p1_card12 = buttons(p1_cards[11], (1325, 470), screen1)\r\n p1_card13 = buttons(p1_cards[12], (1435, 470), screen1)\r\n \r\n d_card = image('card_back.png', 1000, 100)\r\n p2card = image('card_back.png', 130, 100)\r\n p3card = image('card_back.png', 300, 100)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n quitwindow()\r\n\r\n if pygame.mouse.get_pressed()[0] == 1:\r\n mouse = pygame.mouse.get_pos()\r\n if bgmusic[1].collidepoint(mouse):\r\n pygame.mixer.music.stop()\r\n flag = 1\r\n \r\n \r\n if p1_card1[1].collidepoint(mouse):\r\n play1_pick = p1_cards[0]\r\n flag1 += 1\r\n \r\n if p1_card2[1].collidepoint(mouse):\r\n play1_pick = p1_cards[1]\r\n flag1 += 1\r\n \r\n if p1_card3[1].collidepoint(mouse):\r\n play1_pick = p1_cards[2]\r\n flag1 += 1\r\n \r\n if p1_card4[1].collidepoint(mouse):\r\n play1_pick = p1_cards[3]\r\n flag1 += 1\r\n \r\n if p1_card5[1].collidepoint(mouse):\r\n play1_pick = p1_cards[4]\r\n flag1 += 1\r\n \r\n if p1_card6[1].collidepoint(mouse):\r\n play1_pick = p1_cards[5]\r\n flag1 += 1\r\n \r\n if p1_card7[1].collidepoint(mouse):\r\n play1_pick = p1_cards[6]\r\n flag1 += 1\r\n \r\n if p1_card8[1].collidepoint(mouse):\r\n play1_pick = p1_cards[7]\r\n flag1 += 1\r\n \r\n if p1_card9[1].collidepoint(mouse):\r\n play1_pick = p1_cards[8]\r\n flag1 += 1\r\n \r\n if p1_card10[1].collidepoint(mouse):\r\n play1_pick = p1_cards[9]\r\n flag1 += 1\r\n \r\n if p1_card11[1].collidepoint(mouse):\r\n play1_pick = p1_cards[10]\r\n flag1 += 1\r\n \r\n if p1_card12[1].collidepoint(mouse):\r\n play1_pick = p1_cards[11]\r\n flag1 += 1\r\n \r\n if p1_card13[1].collidepoint(mouse):\r\n play1_pick = p1_cards[12]\r\n flag1 += 1\r\n \r\n if flag == 1:\r\n image('mute1.png', 8, 10)\r\n\r\n d_front = image(diamond_pick, 1025, 96)\r\n \r\n if flag1 > 0:\r\n p1_front = image(play1_pick, 635, 240)\r\n p2_front = image(play2_pick, 155, 96)\r\n p3_front = image(play3_pick, 325, 96)\r\n\r\n p1_value = card_value(play1_pick[0])\r\n p2_value = card_value(play2_pick[0])\r\n p3_value = card_value(play3_pick[0])\r\n p = diamond_points(diamond_pick[0])\r\n\r\n if max(p1_value, p2_value, p3_value) == p1_value and max(p1_value, p2_value, p3_value) == p2_value and max(p1_value, p2_value, p3_value) == p3_value:\r\n score(score1+(p/3), score2+(p/3), score3+(p/3))\r\n winner(score1+(p/3), score2+(p/3), score3+(p/3)) \r\n elif max(p1_value, p2_value, p3_value) == p1_value and max(p1_value, p2_value, p3_value) == p2_value:\r\n score(score1+(p/2), score2+(p/2), score3)\r\n winner(score1+(p/2), score2+(p/2), score3) \r\n elif max(p1_value, p2_value, p3_value) == p2_value and max(p1_value, p2_value, p3_value) == p3_value:\r\n score(score1, score2+(p/2), score3+(p/2))\r\n winner(score1, score2+(p/2), score3+(p/2))\r\n elif max(p1_value, p2_value, p3_value) == p3_value and max(p1_value, p2_value, p3_value) == p1_value: \r\n score(score1+(p/2), score2, score3+(p/2))\r\n winner(score1+(p/2), score2, score3+(p/2)) \r\n elif max(p1_value, p2_value, p3_value) == p1_value:\r\n score(score1+p, score2, score3)\r\n winner(score1+p, score2, score3) \r\n elif max(p1_value, p2_value, p3_value) == p2_value:\r\n score(score1, score2+p, score3)\r\n winner(score1, score2+p, score3)\r\n elif max(p1_value, p2_value, p3_value) == p3_value: \r\n score(score1, score2, score3+p)\r\n winner(score1, score2, score3+p)\r\n\r\n pygame.display.update()\r\n print(\"card picked by player1:\",play1_pick)\r\n\r\nrunning = True \r\nwhile running:\r\n # background(R,G,B)\r\n screen.fill((200,220,240))\r\n title_icon(\"Diamonds\", \"icon1.jpg\")\r\n\r\n start = buttons('start1.png',(600,270), screen)\r\n exit = buttons('exit1.png',(1100, 270),screen)\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if pygame.mouse.get_pressed()[0] == 1:\r\n mouse = pygame.mouse.get_pos()\r\n \r\n if start[1].collidepoint(mouse):\r\n gamewindow()\r\n running = False\r\n \r\n if exit[1].collidepoint(mouse):\r\n running = False\r\n\r\n pygame.display.update()","repo_name":"Saitejaswi-K/Diamonds","sub_path":"game_code/final_game.py","file_name":"final_game.py","file_ext":"py","file_size_in_byte":11382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41841890179","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom trytond.model import ModelSQL, ModelView, fields\nfrom trytond.pool import Pool, PoolMeta\nfrom trytond.pyson import Eval, PYSONDecoder, PYSONEncoder\nfrom trytond.wizard import Wizard, StateAction, StateView, Button\nfrom trytond.rpc import RPC\n\n__all__ = ['Work', 'PredecessorSuccessor',\n 'ProjectResourcePlanStart', 'ProjectResourcePlanTasks',\n 'ProjectResourcePlan']\n\n__metaclass__ = PoolMeta\n\n\nclass Work:\n __name__ = 'project.work'\n\n predecessors = fields.Many2Many('project.predecessor_successor',\n 'successor', 'predecessor', 'Predecessors',\n domain=[\n ('id', '!=', Eval('id')),\n ],\n states={\n 'invisible': Eval('type') == 'project',\n }, depends=['type', 'id'])\n successors = fields.Many2Many('project.predecessor_successor',\n 'predecessor', 'successor', 'Successors',\n domain=[\n ('id', '!=', Eval('id')),\n ],\n states={\n 'invisible': Eval('type') == 'project',\n }, depends=['type', 'id'])\n bookings = fields.One2Many('resource.booking', 'document', 'Bookings',\n states={\n 'invisible': Eval('type') != 'task',\n }, depends=['type'])\n expected_end_date = fields.DateTime('Expected End Date',\n states={\n 'invisible': Eval('type') == 'project',\n },\n depends=['type'])\n planned_start_date = fields.DateTime('Planned Start Date',\n states={\n 'invisible': Eval('type') != 'task',\n },\n depends=['type'])\n planned_end_date = fields.DateTime('Planned End Date',\n states={\n 'invisible': Eval('type') != 'task',\n },\n depends=['type'])\n planned_start_date_project = fields.Function(fields.DateTime(\n 'Planned Start Date',\n states={\n 'invisible': Eval('type') == 'task',\n },\n depends=['type']),\n 'get_project_dates')\n planned_end_date_project = fields.Function(fields.DateTime(\n 'Planned End Date',\n states={\n 'invisible': Eval('type') == 'task',\n },\n depends=['type']),\n 'get_project_dates')\n expected_end_date_project = fields.Function(fields.DateTime(\n 'Expected End Date',\n states={\n 'invisible': Eval('type') == 'task',\n },\n depends=['type']),\n 'get_project_dates', setter='set_expected_end_date_project')\n\n planned_employee = fields.Many2One('company.employee', 'Planned')\n assigned_employee = fields.Many2One('company.employee','Assigned')\n\n @classmethod\n def __setup__(cls):\n super(Work, cls).__setup__()\n cls._error_messages.update({\n 'no_resource_found': 'No resource found for the employee \"%s\"',\n })\n cls._buttons.update({\n 'schedule': {\n 'invisible': (Eval('type') != 'task')\n },\n })\n\n @property\n def scheduled(self):\n return any(b.state == 'confirmed' for b in self.bookings)\n\n @classmethod\n @ModelView.button\n def schedule(cls, works, planning_days=None, done_works=None):\n pool = Pool()\n Resource = pool.get('resource.resource')\n today = datetime.datetime.now()\n Booking = pool.get('resource.booking')\n\n def get_planned_start(predecessors):\n planned_start = None\n for task in predecessors:\n if not planned_start:\n planned_start = task.planned_end_date\n if task.planned_end_date:\n planned_start = max(planned_start, task.planned_end_date)\n if planned_start and planned_start.time() >= task.company.day_ends:\n tomorrow = planned_start + relativedelta(days=1)\n #Skip saturdays and sundays\n while tomorrow.weekday() > 4:\n tomorrow += relativedelta(days=1)\n planned_start = datetime.datetime.combine(tomorrow,\n task.company.day_starts)\n return planned_start\n\n if done_works is None:\n done_works = set()\n to_allocate = []\n for work in works:\n if work.id in done_works or work.scheduled:\n continue\n\n Booking.delete(work.bookings)\n planned_end = None\n predecessors = list(work.predecessors)\n resource = None\n if predecessors:\n cls.schedule(predecessors, planning_days, done_works)\n if not work.assigned_employee:\n start = get_planned_start(list(work.predecessors)) or today\n # Find the employee that can start first the task\n resource = Resource.get_free_resource(start, work.effort,\n domain=work.get_free_resource_domain())\n if not resource:\n continue\n if not work.effort:\n continue\n\n planned_start = get_planned_start(predecessors)\n start = planned_start or today\n\n effort = work.effort\n if not resource:\n resources = Resource.search([\n ('employee', '=', work.assigned_employee.id),\n ], limit=1)\n if not resources:\n cls.raise_user_error('no_resource_found',\n assigned_employee.rec_name)\n resource, = resources\n bookings = resource.book_hours(start, effort, planning_days)\n s, e = resource.book_interval(bookings)\n if not s:\n continue\n planned_start = planned_start and min(planned_start, s) or s\n planned_end = planned_end and max(planned_end, e) or e\n resource.book(bookings, 'project.work,%s' % work.id)\n\n work.planned_end_date = planned_end\n work.planned_start_date = planned_start\n work.planned_employee = resource.employee.id\n work.save()\n done_works.add(work.id)\n\n\n def get_free_resource_domain(self):\n return [('employee', '!=', None)]\n\n @classmethod\n def get_project_dates(cls, works, names):\n result = {}\n for name in names:\n result[name] = {}.fromkeys([w.id for w in works], None)\n\n for work in works:\n for child in cls.search([('parent', 'child_of', [w.id])]):\n for name in names:\n func = min if 'start' in name else max\n fname = name[:-8]\n current = result[name][work.id]\n value = getattr(child, fname)\n if not current:\n current = value\n if value:\n result[name][work.id] = func(current, value)\n return result\n\n @classmethod\n def set_expected_end_date_project(cls, works, name, value):\n childs = cls.search([\n ('parent', 'child_of', [w.id for w in works]),\n ])\n cls.write(childs, {\n 'expected_end_date': value,\n })\n\n def get_assigned_employee(self, name):\n if self.assigned_employee:\n return self.assigned_employee\n\n @classmethod\n def set_assigned_employee(cls, works, name, value):\n Allocation = Pool().get('project.allocation')\n Allocation.delete([allocation for work in works\n for allocation in work.allocations])\n if value:\n to_create = []\n for work in works:\n to_create.append({\n 'employee': value,\n 'work': work.id,\n 'percentage': 100.0,\n })\n Allocation.create(to_create)\n\n @classmethod\n def search_assigned_employee(cls, name, clause):\n if clause[2] is None:\n return [('allocations',) + tuple(clause[1:])]\n return [('allocations.employee',) + tuple(clause[1:])]\n\n\nclass PredecessorSuccessor(ModelSQL):\n 'Predecessor - Successor'\n __name__ = 'project.predecessor_successor'\n predecessor = fields.Many2One('project.work', 'Predecessor',\n ondelete='CASCADE', required=True, select=True)\n successor = fields.Many2One('project.work', 'Successor',\n ondelete='CASCADE', required=True, select=True)\n\n @classmethod\n def __setup__(cls):\n super(PredecessorSuccessor, cls).__setup__()\n cls.__rpc__.update({\n 'read': RPC(True),\n 'search': RPC(True),\n 'search_read': RPC(True),\n })\n\nclass ProjectResourcePlanStart(ModelView):\n 'Project Resource Plan Start'\n __name__ = 'project.resource.plan.start'\n\n view_search = fields.Many2One('ir.ui.view_search', 'Search',\n domain=[\n ('model', '=', 'project.work'),\n ])\n domain = fields.Char('Domain')\n order = fields.Char('Order')\n planning_days = fields.Integer('Planning Days')\n delete_drafts = fields.Boolean('Delete drafts', help='If marked all the '\n 'draft bookings will be deleted.')\n confirm_bookings = fields.Boolean('Confirm Bookings', help='If marked the '\n 'generated bookings will be confirmed.')\n\n @staticmethod\n def default_delete_drafts():\n return True\n\n @staticmethod\n def default_planning_days():\n return 90\n\n @fields.depends('view_search')\n def on_change_with_domain(self):\n return self.view_search.domain if self.view_search else None\n\n\nclass ProjectResourcePlanTasks(ModelView):\n 'Project Resource Plan Tasks'\n __name__ = 'project.resource.plan.tasks'\n\n tasks = fields.Many2Many('project.work', None, None, 'Tasks To Schedule',\n domain=[\n ('type', '=', 'task'),\n ])\n\n\nclass ProjectResourcePlan(Wizard):\n 'Project Resource Plan'\n __name__ = 'project.resource.plan'\n\n start = StateView('project.resource.plan.start',\n 'project_resource_plan.resource_plan_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Ok', 'tasks', 'tryton-ok', default=True),\n ])\n tasks = StateView('project.resource.plan.tasks',\n 'project_resource_plan.resource_plan_tasks_view_form', [\n Button('Back', 'start', 'tryton-go-previous'),\n Button('Ok', 'plan', 'tryton-ok', default=True),\n ])\n plan = StateAction('project_resource_plan.act_project_allocation_tree')\n\n def _execute(self, state_name):\n result = super(ProjectResourcePlan, self)._execute(state_name)\n if state_name == 'tasks' and self.start.domain:\n #Ensure that the view domain respects the start domain\n domain = result['view']['fields_view']['fields']['tasks']['domain']\n decoder = PYSONDecoder()\n domain = decoder.decode(domain)\n view_domain = decoder.decode(self.start.domain)\n domain.extend(view_domain)\n domain = PYSONEncoder().encode(domain)\n result['view']['fields_view']['fields']['tasks']['domain'] = domain\n return result\n\n def default_tasks(self, fields):\n pool = Pool()\n Work = pool.get('project.work')\n order = None\n domain = []\n if self.start.domain:\n domain = PYSONDecoder().decode(self.start.domain)\n if self.start.order:\n order = PYSONDecoder().decode(self.start.order)\n domain.append(('type', '=', 'task'))\n tasks = Work.search(domain, order=order)\n return {'tasks': [t.id for t in tasks]}\n\n def do_plan(self, action):\n pool = Pool()\n Work = pool.get('project.work')\n Booking = pool.get('resource.booking')\n\n if self.start.delete_drafts:\n to_delete = Booking.search([\n ('state', '=', 'draft'),\n ])\n\n if to_delete:\n Booking.cancel(to_delete)\n Booking.delete(to_delete)\n\n tasks = list(self.tasks.tasks)\n planning_days = relativedelta(days=self.start.planning_days)\n Work.schedule(tasks, planning_days)\n if self.start.confirm_bookings:\n to_confirm = []\n for task in tasks:\n to_confirm.extend(list(task.bookings))\n if to_confirm:\n Booking.confirm(to_confirm)\n return action, {'res_id': [x.id for x in tasks]}\n","repo_name":"NaN-tic/trytond-project_resource_plan","sub_path":"work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":12584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26065570288","text":"'''\nИндекс массы тела\nНапишите программу для вычисления и оценки индекса массы тела (ИМТ) человека. ИМТ показывает весит человек больше или меньше нормы для своего роста. ИМТ человека рассчитывают по формуле:\nгде масса измеряется в килограммах, а рост — в метрах.\nМасса человека считается оптимальной, если его ИМТ находится между 18.518.5 и 2525. Если ИМТ меньше 18.518.5, то считается, что человек весит ниже нормы. Если значение ИМТ больше 2525, то считается, что человек весит больше нормы.\nПрограмма должна вывести \"Оптимальная масса\", если ИМТ находится между 18.518.5 и 2525 (включительно). \"Недостаточная масса\", если ИМТ меньше 18.518.5 и \"Избыточная масса\", если значение ИМТ больше 2525.\nФормат входных данных\nНа вход программе подается два числа: масса и рост человека, каждое на отдельной строке. Все входные числа являются вещественными, используйте для них тип данных float.\n'''\nweight, height = float(input()), float(input())\nimt = weight / (height ** 2)\nif imt < 18.5:\n print('Недостаточная масса')\nelif 18.5 <= imt <= 25:\n print('Оптимальная масса')\nelse:\n print('Избыточная масса')\n","repo_name":"nordcap/stepik","sub_path":"Stepik-Поколение Python: курс для продвинутых/2 Основные конструкции/Часть 1/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"129297669","text":"\n\n\n\nclass Razon():\n # Devuelve una respuesta por cada tipo de error que puede llevar a un rechazo de la transacción\n def __init__(self):\n self._mensaje=\"\"\n def getMensaje(self):\n print(self._mensaje)\n return self._mensaje\n\n def resolver(self,cliente,evento):\n pass\n \n \n\nclass Alta_chequera(Razon):\n def resolver(self, cliente, evento):\n if evento['totalChequerasActualmente']>=cliente.limite_chequeras:\n self._mensaje =f\"Lo sentimos ud ha superado el limite de chequeras posibles\"\n \n if cliente.get_tier()=='Classic' or cliente.get_tier()=='CLASSIC':\n self._mensaje=f\"Lo sentimos la categoria {cliente.get_tier()}, no permite realizar esta operación\"\n \n\nclass Compra_dolar(Razon):\n def resolver(self, cliente, evento):\n if not cliente.puede_comprar_dolares():\n self._mensaje=\"Lo sentimos pero ud. no se encuentra habilitdao para esta operación\"\n \n elif evento[\"monto\"]>cliente._cuenta_en_dolares:\n self._mensaje=\"Ud. ha superado el monto de transferencia a recibir\"\n \n else:\n self._mensaje=\"Lo sentimos no fue posible procesar la transaccion\"\n \nclass Retiro_efectivo(Razon):\n def resolver(self, cliente, evento):\n \n if cliente.get_limite_extraccion_diario('Caja_Ahorro_Pesos')evento[\"monto\"]& evento[\"monto\"]>cliente.get_saldo_descubierto_disponible('Cuenta_Corriente'):\n self._mensaje=\"Lo sentimos su saldo descubierto no cubre el monto solicitado\"\n \n elif evento[\"monto\"]>evento[\"cupoDiarioRestante\"]:\n self._mensaje=\"Lo sentimos pero ha excedido su cupo diario\"\n \nclass Enviar_transferencia(Razon):\n def resolver(self, cliente, evento):\n if evento[\"saldoEnCuenta\"]<(evento[\"monto\"]+(evento[\"monto\"]*0.01)):\n self._mensaje=\"Lo sentimos pero no posee fondos suficientes para realizar esta operacion\"\n \n elif evento[\"saldoEnCuenta\"]>evento[\"monto\"]& evento[\"monto\"]>cliente.get_saldo_descubierto_disponible('Cuenta_Corriente'):\n self._mensaje=\"Lo sentimos su saldo descubierto no cubre el monto solicitado\"\n \n\nclass Alta_Tarjeta_Credito(Razon):\n def resolver(self, cliente, evento):\n if cliente.limite_tarjetasCredito>=evento[\"totalTarjetasDeCreditoActualmente\"]:\n self._mensaje=\"Lo sentimos pero ud ya ha alcanzado el limite de tarjetas posible\"\n \n elif cliente.get_tier()=='Classic'or cliente.get_tier()=='CLASSIC': \n self._mensaje=f\"Lo sentimos la categoria {cliente.get_tier()}, no permite realizar esta operación\"\n \nclass Recibir_transfeerencia(Razon):\n def resolver(self, cliente, evento):\n if cliente.get_transferencia_recibida('Caja_Ahorro_Pesos') 0:\n item['degree_name'] = degree_name[0].strip()\n print(\"item['degree_name']: \", item['degree_name'])\n\n item['programme_en'] = programme_en.replace(item['degree_name'], '').strip()\n print(\"item['programme_en']: \", item['programme_en'])\n\n department = response.xpath(\n \"//div[@id='proxy_rightSummary']//p//span[contains(text(),'College:')]/../text()\").extract()\n clear_space(department)\n item['department'] = ''.join(department).strip()\n # print(\"item['department']: \", item['department'])\n\n ucascode = response.xpath(\n \"//span[contains(text(),'UCAS code:')]/../text()\").extract()\n clear_space(ucascode)\n item['ucascode'] = ''.join(ucascode).strip()\n # print(\"item['ucascode']: \", item['ucascode'])\n\n duration = response.xpath(\n \"//span[contains(text(),'Duration:')]/../text()\").extract()\n clear_space(duration)\n # print(\"duration: \", duration)\n\n duration_list = getIntDuration(''.join(duration))\n if len(duration_list) == 2:\n item['duration'] = duration_list[0]\n item['duration_per'] = duration_list[-1]\n # print(\"item['duration']: \", item['duration'])\n # print(\"item['duration_per']: \", item['duration_per'])\n\n # //div[@class='col-xs-12']//div[@class='row']//div[@class='col-xs-12']//ul[@class='addressList']//li[@class='contactCampus']\n # location = response.xpath(\n # \"//div[@class='col-xs-12']//div[@class='row']//div[@class='col-xs-12']//ul[@class='addressList']//li[@class='contactCampus']/text()\").extract()\n # clear_space(location)\n item['location'] = '33 Buccleuch Place, City, Edinburgh, Post Code. EH8 9JS'\n # print(\"item['location']: \", item['location'])\n\n # # //option[@value='0010']\n # start_date = response.xpath(\n # \"//select[@name='code2']//option//text()\").extract()\n # clear_space(start_date)\n # if len(start_date) > 1:\n # item['start_date'] = start_date[0].strip()\n # # print(\"item['start_date']: \", item['start_date'])\n # item['start_date'] = getStartDate(item['start_date'])\n # print(\"item['start_date'] = \", item['start_date'])\n\n overview = response.xpath(\n \"//div[@id='proxy_introduction']\").extract()\n item['overview_en'] = remove_class(clear_lianxu_space(overview))\n # print(\"item['overview_en']: \", item['overview_en'])\n\n\n # //div[@id='proxy_collapseprogramme']\n modules = response.xpath(\n \"//div[@id='proxy_collapseWhatStudy']/..\").extract()\n item['modules_en'] = remove_class(clear_lianxu_space(list(modules)))\n # print(\"item['modules_en']: \", item['modules_en'])\n\n assessment_en = response.xpath(\n \"//div[@id='proxy_collapseLearning']/..\").extract()\n item['assessment_en'] = remove_class(clear_lianxu_space(assessment_en))\n # print(\"item['assessment_en']: \", item['assessment_en'])\n\n career = response.xpath(\n \"//div[@id='proxy_collapseCareers']/..\").extract()\n item['career_en'] = remove_class(clear_lianxu_space(career))\n # print(\"item['career_en']: \", item['career_en'])\n\n # //div[@id='proxy_collapseentry_req']\n # entry_requirements = response.xpath(\n # \"//div[@id='proxy_collapseentry_req']/..//text()\").extract()\n # item['rntry_requirements'] = clear_lianxu_space(entry_requirements)\n # print(\"item['rntry_requirements']: \", item['rntry_requirements'])\n\n alevel = response.xpath(\n \"//li[contains(text(),'A Levels:')]//text()|//p[contains(text(),'A levels:')]//text()\").extract()\n clear_space(alevel)\n if len(alevel) > 0:\n item['alevel'] = ''.join(alevel[-1]).strip()\n print(\"item['alevel'] = \", item['alevel'])\n\n # ib = response.xpath(\n # \"//html//ul[1]/li[3]/abbr[contains(text(),'IB')]/..//text()|//p[contains(text(),'IB:')]//text()\").extract()\n ib = response.xpath(\"//html//ul[3]/li[3]/abbr[contains(text(),'IB')]/..//text()|//p[contains(text(),'IB:')]//text()\").extract()\n clear_space(ib)\n if len(ib) > 0:\n item['ib'] = ''.join(ib).strip()\n print(\"item['ib'] = \", item['ib'])\n\n IELTS = response.xpath(\"//abbr[contains(text(),'IELTS')]/..//text()\").extract()\n item['ielts_desc'] = ''.join(IELTS)\n # print(\"item['ielts_desc']: \", item['ielts_desc'])\n\n ieltsDict = get_ielts(item['ielts_desc'])\n item['ielts'] = ieltsDict.get(\"IELTS\")\n item['ielts_l'] = ieltsDict.get(\"IELTS_L\")\n item['ielts_s'] = ieltsDict.get(\"IELTS_S\")\n item['ielts_r'] = ieltsDict.get(\"IELTS_R\")\n item['ielts_w'] = ieltsDict.get(\"IELTS_W\")\n # print(\"item['ielts'] = %s item['ielts_l'] = %s item['ielts_s'] = %s item['ielts_r'] = %s item['ielts_w'] = %s \" % (\n # item['ielts'], item['ielts_l'], item['ielts_s'], item['ielts_r'], item['ielts_w']))\n\n TOEFL = response.xpath(\"//abbr[contains(text(),'TOEFL')]/..//text()\").extract()\n if len(TOEFL) == 0:\n TOEFL = response.xpath(\"//*[contains(text(),'TOEFL')]//text()\").extract()\n item['toefl_desc'] = ''.join(TOEFL)\n # print(\"item['toefl_desc']: \", item['toefl_desc'])\n\n toeflDict = get_toefl(item['toefl_desc'])\n item['toefl'] = toeflDict.get(\"TOEFL\")\n item['toefl_l'] = toeflDict.get(\"TOEFL_L\")\n item['toefl_s'] = toeflDict.get(\"TOEFL_S\")\n item['toefl_r'] = toeflDict.get(\"TOEFL_R\")\n item['toefl_w'] = toeflDict.get(\"TOEFL_W\")\n # print(\"item['toefl'] = %s item['toefl_l'] = %s item['toefl_s'] = %s item['toefl_r'] = %s item['toefl_w'] = %s \" % (\n # item['toefl'], item['toefl_l'], item['toefl_s'], item['toefl_r'], item['toefl_w']))\n\n tuition_feeDict = {}\n tuition_fee_url = response.xpath(\"//html//div[@id='proxy_collapseFees']//p[1]/a/@href\").extract()\n print(\"tuition_fee_url: \", tuition_fee_url)\n if len(tuition_fee_url) > 0:\n tuition_fee_url_str = tuition_fee_url[0]\n fee = self.parse_tuition_fee(tuition_fee_url_str)\n clear_space(fee)\n fee_re = re.findall(r\"£\\d+,\\d+\", ''.join(fee))\n print(\"fee_re: \", fee_re)\n item['tuition_fee'] = getTuition_fee(''.join(fee_re))\n item['tuition_fee_pre'] = \"£\"\n if item['tuition_fee'] == 0:\n item['tuition_fee'] = None\n item['tuition_fee_pre'] = \"\"\n print(\"item['tuition_fee']: \", item['tuition_fee'])\n\n # https://www.ed.ac.uk/studying/international/country/asia/east-asia/china\n item['require_chinese_en'] = remove_class(clear_lianxu_space([\"\"\"

Undergraduate entry requirements for students from China.

\n\n\n

Senior High School Certificate

\n\n

Students who have completed the Chinese Senior High School Certificate are required to undertake further study for entry to most subjects as this qualification does not normally meet our minimum entry requirements.

\n\n

We accept the following qualifications for direct entry to our undergraduate degree programmes:

\n\n\n\n

Applicants with qualifications other than those listed above will usually be required to complete a Foundation Year before entering the University.

\n\n

Foundation year

\n\n

Science and Engineering

\n\n

For degree programmes in Science and Engineering, applicants who have completed a year of study at a leading Chinese University may be eligible to apply.

\n\n

The College of Science & Engineering will also give consideration to applicants who have achieved excellent results in the Chinese National University Entrance Examination (Gaokao) on an individual basis.

\n\n

Further guidance on academic entry requirements

\n\n

Each course may have further specific entry requirements. All applicants must meet these requirements. Staff in the Admissions Offices will be able to provide further guidance.

\n\n

Undergraduate admissions contacts

\n\n

English Language requirements

\n\n

If your first language is not English, you will also have to meet English Language requirements to apply. These requirements are listed by programme.

\n\n

English Language advice

\n\n

Specific English language requirement by programme

\n\n

Contact us

\n\n

Edinburgh Global's representative for China is Esther Sum.

\n\n

Esther will help you with admissions advice and support.

\n\n

Contact us by email - China.Enquiries@ed.ac.uk

\n\n

Support in your country

\n\n

View a list of our overseas visits

\n\n

Our agents in your country

\n\n

Chat to us

\n\n

Talk to a member of staff online and view a presentation about study in Edinburgh.

\n\n

Chat to us

\n\n

Join our mailing list

\n\n

We will send you further useful information about the University, admissions and entry.

\n\n

Join our mailing list

\n\n

About Edinburgh

\n\n

More information about Edinburgh

\n\n

Do I need a visa?

\n\n

Student numbers

\n\n

There are almost 3,000 students students from China currently studying at the University of Edinburgh.

\n\"\"\"]))\n # print(\"item['require_chinese_en']: \", item['require_chinese_en'])\n\n item['apply_proces_en'] = \"https://www.ed.ac.uk/studying/undergraduate/applying\"\n\n yield item\n except Exception as e:\n with open(\"scrapySchool_England_Ben/error/\"+item['university']+str(item['degree_type'])+\".txt\", 'a', encoding=\"utf-8\") as f:\n f.write(str(e) + \"\\n\" + response.url + \"\\n========================\")\n print(\"异常:\", str(e))\n print(\"报错url:\", response.url)\n\n def get_modules2(self, modules2url):\n data = requests.get(modules2url, headers=self.headers)\n response = etree.HTML(data.text)\n modules2 = response.xpath(\"/html/body/div[@class='container']\")\n m2 = etree.tostring(modules2[0], encoding='unicode', pretty_print=False, method='html')\n m2 = remove_class(clear_space_str(m2))\n return m2\n\n def parse_tuition_fee(self, tuition_fee_url):\n data = requests.get(tuition_fee_url, headers=self.headers)\n response = etree.HTML(data.text)\n fee = response.xpath(\"//html//table[1]//tr[2]/td//text()\")\n return fee\n","repo_name":"histudent/python_spider","sub_path":"yyx_crawler/scrapySchool_England_Ben/scrapySchool_England_Ben/spiders/TheUniversityOfEdinburgh_U.py","file_name":"TheUniversityOfEdinburgh_U.py","file_ext":"py","file_size_in_byte":14963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9241336334","text":"from flask import Flask, request, jsonify\nfrom app.send_response import send\nimport sys\nimport json\n\n\napp = Flask(__name__) \n\nreceived_commands=[]\n@app.route(\"/\") \ndef home_view(): \n\treturn \"

Hello world

\"\n\n@app.route('/telegram-commands', methods=['GET','POST'])\ndef receive_commands():\n if request.method=='POST':\n # print(request.json)\n # sys.stdout.flush()\n send(json.dumps(request.json['message']['text']), request.json['message']['from']['id'])\n return 'ok'\n return jsonify(received_commands)\n\n\nif __name__ =='__main__':\n app.secret_key='secret1234'\n app.run()","repo_name":"Freditansari/telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74835720019","text":"from rest_framework.serializers import ModelSerializer\nfrom interfaces.models import Interfaces\nfrom .models import Testcases\nfrom rest_framework import serializers\nfrom projects.models import Projects\n\n\nclass InterfacesAnotherSerializer(ModelSerializer):\n project = serializers.StringRelatedField()\n pid = serializers.IntegerField(write_only=True, help_text=\"项目ID\")\n iid = serializers.IntegerField(write_only=True, help_text=\"接口ID\")\n\n class Meta:\n model = Interfaces\n fields = (\"iid\", \"name\", \"pid\", \"project\")\n extra_kwargs = {\n \"name\": {\"read_only\": True}\n }\n\n def validate(self, attrs):\n if not Projects.objects.filter(is_delete=False, id=attrs[\"pid\"]):\n raise serializers.ValidationError(\"项目ID不存在\")\n if not Interfaces.objects.filter(is_delete=False, id=attrs[\"iid\"]):\n raise serializers.ValidationError(\"接口ID不存在\")\n if not Interfaces.objects.filter(is_delete=False, id=attrs[\"iid\"], project=attrs[\"pid\"]):\n raise serializers.ValidationError(\"接口ID和项目ID不对应\")\n return attrs\n\n\nclass TestcasesSerializer(ModelSerializer):\n interfaces = InterfacesAnotherSerializer(help_text=\"所属项目和接口信息\")\n\n class Meta:\n model = Testcases\n fields = (\"id\", \"name\", \"interfaces\", \"include\", \"author\", \"request\")\n extra_kwargs = {\n \"include\": {\n \"write_only\": True\n },\n \"request\": {\"write_only\": True\n }\n\n }\n\n def create(self, validated_data):\n interface_dict = validated_data.pop(\"interfaces\")\n validated_data[\"interfaces_id\"] = interface_dict[\"iid\"]\n return Testcases.objects.create(**validated_data)\n","repo_name":"christ199705/Project","sub_path":"apps/testcases/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2311874989","text":"\nimport os\nimport sys\nimport pickle\n\ncsv_path = sys.argv[1]\noutput_root = os.path.dirname(csv_path)\nif 'train' in csv_path:\n output_file = os.path.join(output_root, 'train.pkl')\nelif 'val' in csv_path:\n output_file = os.path.join(output_root, 'val.pkl')\nelse:\n raise ValueError(csv_path)\n\nannots = {}\nwith open(csv_path, 'r') as fin:\n for line in fin:\n videoname, fid, x1,y1,x2,y2, label, pid = line.strip().split(',')\n fid = int(fid)\n box = [float(x1), float(y1), float(x2), float(y2)]\n label = int(label)\n pid = int(pid)\n\n if videoname in annots:\n if fid in annots[videoname]:\n if pid in annots[videoname][fid]:\n annots[videoname][fid][pid]['label'].append(label)\n else:\n annots[videoname][fid][pid] = {'label': [label],\n 'box': box}\n else:\n annots[videoname][fid] = {pid: {'label': [label],\n 'box': box}}\n else:\n annots[videoname] = {fid: {pid: {'label': [label],\n 'box': box}}}\n\nwith open(output_file, 'wb') as fout:\n pickle.dump(annots, fout)\n","repo_name":"NVlabs/STEP","sub_path":"scripts/generate_label.py","file_name":"generate_label.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":244,"dataset":"github-code","pt":"12"} +{"seq_id":"35219196265","text":"#!/usr/bin/env python3\r\n# -*- coding=utf-8 -*-\r\n\r\nimport cv2 as cv\r\nimport time\r\n\r\n\"\"\"\r\nOpenCV DNN 实现性别与年龄预测\r\n OpenCV DNN中如何调用多个模型,相互配合使用\r\n实现步骤:\r\n 1. 完整的实现步骤需要如下几步:\r\n 2. 预先加载三个网络模型\r\n 3. 打开摄像头视频流/加载图像\r\n 4. 对每一帧进行人脸检测\r\n - 对检测到的人脸进行性别与年龄预测\r\n - 解析预测结果\r\n - 显示结果\r\n\"\"\"\r\n\r\nFACE_PROTO = \"../../../raspberry-auto/models/face_detector/opencv_face_detector.pbtxt\"\r\nFACE_MODEL = \"../../../raspberry-auto/models/face_detector/opencv_face_detector_uint8.pb\"\r\n\r\nAGE_PROTO = \"../../../raspberry-auto/models/cnn_age_gender_models/age_deploy.prototxt\"\r\nAGE_MODEL = \"../../../raspberry-auto/models/cnn_age_gender_models/age_net.caffemodel\"\r\n\r\nGENDER_PROTO = \"../../../raspberry-auto/models/cnn_age_gender_models/gender_deploy.prototxt\"\r\nGENDER_MODEL = \"../../../raspberry-auto/models/cnn_age_gender_models/gender_net.caffemodel\"\r\n\r\nMODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)\r\nAGE_LIST = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\r\nGENDER_LIST = ['Male', 'Female']\r\nPADDING = 40\r\n\r\n\r\ndef find_face(image, net, conf_threshold=0.7):\r\n h, w = image.shape[:2]\r\n data = cv.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)\r\n net.setInput(data)\r\n detections = net.forward()\r\n boxes = []\r\n for i in range(detections.shape[2]):\r\n confidence = detections[0, 0, i, 2]\r\n if conf_threshold < confidence:\r\n left = int(detections[0, 0, i, 3] * w)\r\n top = int(detections[0, 0, i, 4] * h)\r\n right = int(detections[0, 0, i, 5] * w)\r\n bottom = int(detections[0, 0, i, 6] * h)\r\n boxes.append([left, top, right, bottom])\r\n cv.rectangle(image, (left, top), (right, bottom), (255, 0, 0), 2, cv.LINE_8)\r\n return image, boxes\r\n\r\n\r\ndef main():\r\n age_net = cv.dnn.readNet(AGE_MODEL, AGE_PROTO)\r\n gender_net = cv.dnn.readNet(GENDER_MODEL, GENDER_PROTO)\r\n face_net = cv.dnn.readNet(FACE_MODEL, FACE_PROTO)\r\n t = time.time()\r\n image = cv.imread(\"../../../raspberry-auto/pic/70eb501cjw1dwp7pecgewj.jpg\")\r\n image, boxes = find_face(image, face_net)\r\n if not boxes:\r\n print(\"can't find any face here!\")\r\n return\r\n h, w = image.shape[:2]\r\n for box in boxes:\r\n roi = image[\r\n max(0, box[1] - PADDING): min(box[3] + PADDING, h - 1),\r\n max(0, box[0] - PADDING): min(box[2] + PADDING, w - 1)]\r\n data = cv.dnn.blobFromImage(roi, 1.0, (227, 227), MODEL_MEAN_VALUES, False, False)\r\n gender_net.setInput(data)\r\n age_net.setInput(data)\r\n gender_out = gender_net.forward()\r\n gender_info = GENDER_LIST[gender_out[0].argmax()]\r\n cv.putText(image, \"%s, %.3f\" % (gender_info, gender_out[0].max()), (box[0] - PADDING, box[1] - PADDING),\r\n cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))\r\n age_out = age_net.forward()\r\n age_info = AGE_LIST[age_out[0].argmax()]\r\n cv.putText(image, \"{}, {:.3f}\".format(age_info, age_out[0].max()), (box[0], box[3] + PADDING),\r\n cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))\r\n print(\"use time %.3f ms\" % (time.time() - t))\r\n # cv.namedWindow(\"dst\", cv.WINDOW_FREERATIO)\r\n cv.imshow(\"dst\", image)\r\n cv.waitKey(0)\r\n\r\n\r\nif \"__main__\" == __name__:\r\n main()\r\n cv.destroyAllWindows()\r\n","repo_name":"afterloe/opencv-practice","sub_path":"cv_workshops/13-section/5-clazz.py","file_name":"5-clazz.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"12"} +{"seq_id":"42040487499","text":"import os,time, datetime\nfrom datetime import timedelta\nfrom sense_hat import SenseHat\n# enter target time and date here\n\nday=30\nmonth=6\nyear=2017\nhour=0\nminutes=0\nsec=0\ntargetTime = datetime.datetime(year, month, day, hour, minutes) # sets up target time\ntimeNow =datetime.datetime.now()\n\n\nremainingTime=(targetTime-timeNow)\ndays = remainingTime.days\nsecs = remainingTime.seconds\nhrs, secs = divmod(secs, 3600)\nmins, secs = divmod(secs, 60)\n\n#############################\n\nOFFSET_LEFT = 1\nOFFSET_TOP = 1\n\nNUMS =[1,1,1,1,0,1,1,0,1,1,0,1,1,1,1, # 0\n 0,1,0,0,1,0,0,1,0,0,1,0,0,1,0, # 1\n 1,1,1,0,0,1,0,1,0,1,0,0,1,1,1, # 2\n 1,1,1,0,0,1,1,1,1,0,0,1,1,1,1, # 3\n 1,0,0,1,0,1,1,1,1,0,0,1,0,0,1, # 4\n 1,1,1,1,0,0,1,1,1,0,0,1,1,1,1, # 5\n 1,1,1,1,0,0,1,1,1,1,0,1,1,1,1, # 6\n 1,1,1,0,0,1,0,1,0,1,0,0,1,0,0, # 7\n 1,1,1,1,0,1,1,1,1,1,0,1,1,1,1, # 8\n 1,1,1,1,0,1,1,1,1,0,0,1,0,0,1] # 9\n\n# Displays a single digit (0-9)\ndef show_digit(val, xd, yd, r, g, b):\n offset = val * 15\n for p in range(offset, offset + 15):\n xt = p % 3\n yt = (p-offset) // 3\n sense.set_pixel(xt+xd, yt+yd, r*NUMS[p], g*NUMS[p], b*NUMS[p])\n\n# Displays a two-digits positive number (0-99)\ndef show_number(val, r, g, b):\n abs_val = abs(val)\n tens = abs_val // 10\n units = abs_val % 10\n if (abs_val > 9):\n show_digit(tens, OFFSET_LEFT, OFFSET_TOP, r, g, b)\n show_digit(units, OFFSET_LEFT+4, OFFSET_TOP, r, g, b)\n\n\n################################################################################\n# MAIN\n\nsense = SenseHat()\nsense.clear()\n\nif days <= 9:\n sense.show_letter(str(days),[255,0,0])\nelse:\n show_number(days, 0, 255, 0)\n \n \n\n","repo_name":"gp86041/raspberry_pi_codes","sub_path":"display_countdown_on_Sense_HAT.py","file_name":"display_countdown_on_Sense_HAT.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4764781079","text":"import webapp2\nimport rot13\nimport signup\nimport blog\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.headers[\"Content-Type\"] = \"text/plain\"\n self.response.write(\"Hello, Udacity!\")\n\napp = webapp2.WSGIApplication([(\"/?\", MainPage),\n (\"/rot13/?\", rot13.Rot13),\n (\"/blog/signup/?\", signup.Signup),\n (\"/blog/login/?\", signup.Login),\n (\"/blog/logout/?\", signup.Logout),\n (\"/blog/welcome/?\", signup.Welcome),\n (\"/blog/?\", blog.Blog),\n (\"/blog/([0-9]+)/?\", blog.Post),\n (\"/blog/newpost/?\", blog.NewPost),\n #json\n ((\"/blog/.json\"), blog.BlogJSON),\n ((\"/blog/([0-9]+).json\"), blog.PostJSON),\n (\"/blog/flush/?\", blog.FlushCache)],\n debug=True)\n\n","repo_name":"sefakilic/udacity-cs253","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"15853867434","text":"\"\"\"empty message\n\nRevision ID: 6dfb112feb4a\nRevises: dc588aa20092\nCreate Date: 2019-03-26 10:49:05.747258\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6dfb112feb4a'\ndown_revision = 'dc588aa20092'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('score', sa.Integer(), nullable=True))\n op.create_unique_constraint(None, 'user', ['name'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'score')\n # ### end Alembic commands ###\n","repo_name":"zaoyuaner/Learning-materials","sub_path":"python1812/python_3/day22_模型关系_分页_钩子函数/FlaskDemo4/migrations/versions/6dfb112feb4a_.py","file_name":"6dfb112feb4a_.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"35350237097","text":"import numpy as np\nimport copy\nimport time\nfrom Instance import Instance\nfrom Solution import Solution\nfrom TSPLog import TSPLog\n\nclass SimulatedAnnealing:\n\n def __init__(self, instance, log, inittmp=0, ctype=\"\", cts=None, ntype=\"\", miter=0):\n self.instance = instance\n self.log = log\n self.initial_tmp = inittmp\n self.cooling_type = ctype\n self.constants = cts\n self.neighbourhood_type = ntype\n self.max_iterations = miter\n\n def gen_random_solution(self):\n # Creating a solution with a random permutation\n return Solution(self.instance, perm_random=True)\n\n def gen_neighbourhood(self, solution, type):\n if type == 'anychange': return solution.any_change_neighbourhood()\n elif type == 'adjacent': return solution.adjacent_neighbourhood()\n\n def alpha(self, t_k):\n if self.cooling_type == \"linear\": return self.initial_tmp - self.constants[\"eta\"] * t_k\n elif self.cooling_type == \"log\": return self.constants[\"c\"] / (1 + np.log(self.constants[\"k\"]))\n elif self.cooling_type == \"slow\": return t_k / (1 + self.constants[\"beta\"] * t_k)\n elif self.cooling_type == \"geo\": return self.constants[\"alpha\"] * t_k\n\n def search(self):\n t1 = time.time()\n\n iterations = 0\n # Get a random solution for the instance\n current_solution = self.gen_random_solution()\n current_evaluation = current_solution.get_eval()\n initial_eval = current_evaluation\n\n # Set the best evaluation to infinite and solution as the actual one\n best_evaluation = float(\"inf\")\n best_solution = current_solution\n\n tmp = self.initial_tmp\n self.log.set_initial_temp(tmp).set_max_iters(self.max_iterations)\n\n while iterations < self.max_iterations:\n current_evaluation = current_solution.get_eval()\n # We obtain the current solution neighbourhood\n neighbourhood = self.gen_neighbourhood(current_solution, self.neighbourhood_type)\n\n # Select one random swap\n random_swap_index = np.random.choice(len(neighbourhood))\n random_swap = neighbourhood[random_swap_index]\n\n # Create the random neighbour applying the swap\n random_neighbour = copy.deepcopy(current_solution)\n random_neighbour.apply_swap(random_swap)\n\n # We evaluate the neighbour\n random_neighbour_eval = random_neighbour.get_eval()\n\n # If we find a better solution, take it\n if random_neighbour_eval < current_evaluation:\n current_solution = random_neighbour\n\n # If it is the best so far, save it\n if random_neighbour_eval < best_evaluation:\n best_evaluation = random_neighbour_eval\n best_solution = random_neighbour\n self.log.add_data(\"best-vs-iter\", (iterations, random_neighbour_eval))\n else:\n # Accept solution with this probability\n acceptance_probability = np.exp(-(random_neighbour_eval - current_evaluation) / tmp)\n probability = np.random.uniform()\n\n # Accepting\n if probability < acceptance_probability:\n current_solution = random_neighbour\n\n tmp = self.alpha(tmp)\n iterations += 1\n self.constants[\"k\"] = iterations\n self.log.add_data(\"eval-vs-iter\", (iterations, random_neighbour_eval))\n self.log.add_data(\"act-vs-ini\", (iterations, initial_eval - random_neighbour_eval))\n self.log.add_data(\"best-vs-act\", (iterations, current_evaluation - best_evaluation))\n print(\"Percentage {}%\".format(iterations * 100 // self.max_iterations), end=\"\\r\")\n\n t2 = time.time()\n self.log.set_final_temp(tmp).set_best_eval(best_evaluation).set_time(t2 - t1)\n self.log.add_data(\"best-vs-iter\", (self.max_iterations, best_evaluation))\n return (best_solution, best_evaluation)\n","repo_name":"adriangrpz/evolutivo","sub_path":"t3/src/SimulatedAnnealing.py","file_name":"SimulatedAnnealing.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30475639672","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 10 11:15:33 2021\n\n@author: blink\n\"\"\"\n\nl=[0,2,1,4,1,0]\nl0=[0,2,1,4,5,6]\nl1=[2,3,1,0,5]\nl2=[0,1,2,3,4,5]\nl3=[5,4,3,2,1,0]\n\n\ndef flowers(l):\n flower=[1,1,1,1,1,1]\n for i in range(1,len(l)-1):\n if (l[i]>l[i-1]) & (l[i]>l[i+1]):\n flower[i]+=1\n elif (l[i]>l[i-1]) & (l[i]flower[i]:\n flower[i]=flower[i-1]\n flower[i]+=1\n elif (l[i]l[i+1]):\n if flower[i-1]l[i+1]):\n pass\n print(flower,i,l[i],'\\n')\n\n return flower\n\ndef biggest(a,b):\n if a>b:\n return a\n else:\n return b\n \ndef process(l):\n flower=[]\n for i in range(len(l)):\n flower.append(0)\n midPoint=int(len(l)/2 if len(l)%2==0 else (len(l)+1)/2)-1\n \n for i in range(2) :\n for i in range(1,len(l)-1):\n if (l[i]>=l[i-1]) & (l[i]>=l[i+1]):\n # print('One')\n temp=biggest(flower[i-1],flower[i+1])+1\n flower[i]=temp\n elif (l[i]<=l[i-1]) & (l[i]<=l[i+1]):\n # print('Two')\n pass\n elif (l[i]<=l[i-1]) & (l[i]>=l[i+1]):\n # print('Three')\n if flower[i]<=flower[i+1]:\n flower[i]=flower[i+1]+1\n if flower[i-1]<=flower[i]:\n flower[i-1]=flower[i-1]+1\n \n elif (l[i]>=l[i-1]) & (l[i]<=l[i+1]):\n # print('Four')\n if flower[i]<=flower[i-1]:\n flower[i]=flower[i-1]+1\n if flower[i]>=flower[i+1]:\n flower[i+1]=flower[i]+1\n # print(flower,l[i-1],l[i],l[i+1]) \n if l[len(l)-1]>l[len(l)-2]:\n flower[len(l)-1]=flower[len(l)-2]+1\n else:\n pass\n \n \n if l[len(l)-1]>l[len(l)-2]:\n flower[len(l)-1]=flower[len(l)-2]+1\n if l[0]>l[1]:\n flower[0]=flower[1]+1\n \n\n for i in range(len(l)):\n flower[i]=flower[i]+1\n return(flower)\n\n\ndef splitBySpace(string):\n numberList=[]\n previousPointer=0\n for i,n in zip(string,range(len(string))):\n if i==' ':\n number=int(string[previousPointer:n+1])\n previousPointer=n\n numberList.append(number)\n numberList.append(int(string[previousPointer:n+1]))\n return numberList\n\n\n\nstringArray=input('Enter the ratings of bridsmaid (Seperated by spaces')\narr=splitBySpace(stringArray)\nflowers=process(arr)\nminimumFlowers=0\nfor i in flowers:\n minimumFlowers+=i\nprint('Flowers would be distributed as ',flowers)\nprint('Minimum number of flowers needed : ',minimumFlowers)\n ","repo_name":"sandheepgopinath/Code-Repository","sub_path":"Development/Basics/Python/bridesmaid.py","file_name":"bridesmaid.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"29696212001","text":"ds = ds\nmodel_search = model_search\nds.dtypes()\nds.search_grid\n\n#%% Build feature and model variables\nvset_feature = gamete.design_space.VariableList(\"feature set\", list())\nvset_model = gamete.design_space.VariableList(\"model set\", list())\n\n# Ensure no overlap in columns!\nfeature_names = [vdef['name'] for vdef in ds.search_grid]\nhyper_param_names = [vdef['name'] for vdef in model_search.search_grid]\nassert not list(set(feature_names) & set(hyper_param_names))\n\n# Build the variable sets\nfor vdef in ds.search_grid:\n vset_feature.append(gamete.design_space.Variable(**vdef))\n\nfor vdef in model_search.search_grid:\n vset_model.append(gamete.design_space.Variable(**vdef))\n\n#%% Create the DesignSpace\nthis_ds = gamete.design_space.DesignSpace([vset_feature, vset_model])\nprint(this_ds)\n\nthis_ds.print_design_space()\nprint(this_ds)\n#%% Create a single Genome\n\nfor i in range(20):\n genes = this_ds.gen_genes()\n\n this_genome = gamete.evolution_space.Genome(genes)\n # this_genome.print_genes()\n logging.info(\"Genome: {}\".format(this_genome))\n\n run_definition = dict()\n run_definition['generation'] = 0\n run_definition['population_number'] = i\n run_definition['id'] = this_genome.hash\n run_definition['path_data_root'] = PATH_DATA_ROOT\n run_definition['genome'] = this_genome.export_genome()\n run_definition['sample fraction'] = 0.5\n run_definition['random seed'] = 42\n run_definition['cv folds'] = 5\n\n type(run_definition['genome'])\n\n out_file_name = \"control {}.json\".format(run_definition['id'])\n out_folder_name =\"{:03d}_{}\".format(run_definition['population_number'],run_definition['id'])\n out_path = Path(PATH_EXPERIMENT_ROOT).expanduser() / str(run_definition['generation']) / out_folder_name / out_file_name\n\n out_path.parent.mkdir(parents=True, exist_ok=True)\n with open(out_path, 'w') as f:\n yaml.dump(run_definition, f, default_flow_style=False)\n logging.info(\"Wrote run definition to {}\".format(out_file_name))\n\n","repo_name":"bmj-hackathon/kaggle_petfinder_adoption","sub_path":"src_dawkins/6_mapping/mapping_00_build.py","file_name":"mapping_00_build.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32803350389","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_grid_size(QtCore.QObject):\n width = QtCore.pyqtSignal(int)\n height = QtCore.pyqtSignal(int)\n\n def setupUi(self, grid_size):\n grid_size.setObjectName(\"grid_size\")\n grid_size.resize(400, 300)\n grid_size.setMinimumSize(QtCore.QSize(400, 300))\n grid_size.setMaximumSize(QtCore.QSize(400, 300))\n self.buttonBox = QtWidgets.QDialogButtonBox(grid_size)\n self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setCenterButtons(True)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.title = QtWidgets.QLabel(grid_size)\n self.title.setGeometry(QtCore.QRect(67, 10, 265, 49))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(26)\n self.title.setFont(font)\n self.title.setObjectName(\"title\")\n self.width_x_height = QtWidgets.QLabel(grid_size)\n self.width_x_height.setGeometry(QtCore.QRect(100, 70, 200, 36))\n font = QtGui.QFont()\n font.setPointSize(18)\n self.width_x_height.setFont(font)\n self.width_x_height.setObjectName(\"width_x_height\")\n self.width_value = QtWidgets.QSpinBox(grid_size)\n self.width_value.setGeometry(QtCore.QRect(115, 140, 60, 40))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.width_value.setFont(font)\n self.width_value.setMaximum(80)\n self.width_value.setObjectName(\"width_value\")\n self.height_value = QtWidgets.QSpinBox(grid_size)\n self.height_value.setGeometry(QtCore.QRect(225, 140, 60, 40))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.height_value.setFont(font)\n self.height_value.setMaximum(80)\n self.height_value.setObjectName(\"height_value\")\n self.X_label = QtWidgets.QLabel(grid_size)\n self.X_label.setGeometry(QtCore.QRect(190, 140, 16, 33))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.X_label.setFont(font)\n self.X_label.setObjectName(\"X_label\")\n\n self.retranslateUi(grid_size)\n self.buttonBox.accepted.connect(grid_size.accept) # type: ignore\n QtCore.QMetaObject.connectSlotsByName(grid_size)\n\n def retranslateUi(self, grid_size):\n _translate = QtCore.QCoreApplication.translate\n grid_size.setWindowTitle(_translate(\"grid_size\", \"Grid Size\"))\n self.title.setText(_translate(\"grid_size\", \"set the grid size\"))\n self.width_x_height.setText(_translate(\"grid_size\", \"Width X Height\"))\n self.X_label.setText(_translate(\"grid_size\", \"X\"))\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n grid_size = QtWidgets.QDialog()\n ui = Ui_grid_size()\n ui.setupUi(grid_size)\n grid_size.show()\n sys.exit(app.exec_())\n","repo_name":"Ahmad-Hamdy/Shortest-path-finder","sub_path":"grid_size.py","file_name":"grid_size.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5788783378","text":"moneyForToys = 0\nkidsCount = 0\nmoneyForSweaters = 0\nadultsCount = 0\ncommand = input()\n\nwhile not command == \"Christmas\":\n years = int(command)\n if years <= 16:\n moneyForToys += 5\n kidsCount += 1\n else:\n moneyForSweaters += 15\n adultsCount += 1\n\n command = input()\n\nprint(f\"Number of adults: {adultsCount}\")\nprint(f\"Number of kids: {kidsCount}\")\nprint(f\"Money for toys: {moneyForToys}\")\nprint(f\"Money for sweaters: {moneyForSweaters}\")","repo_name":"petiatodorova/PythonFundamentals","sub_path":"while_exercises/christmass_debug.py","file_name":"christmass_debug.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38029823933","text":"from random import seed\n\nfrom numpy.random import randint\nfrom percentile import find_percentile, find_percentile_initial_approach\nimport unittest\n\n\nclass UnitTests(unittest.TestCase):\n\n def test_b_is_empty_should_return(self):\n answer = find_percentile([1, 2, 4], [], 50)\n expected_answer = 2\n self.assertEqual(expected_answer, answer)\n\n def test_a_is_empty_should_return(self):\n answer = find_percentile([], [1, 2, 4], 50)\n expected_answer = 2\n self.assertEqual(expected_answer, answer)\n\n def test_median(self):\n test_a, test_b, test_p = [1, 2, 7, 8, 10], [6, 12], 50\n self.assertEqual(7, find_percentile(test_a, test_b, test_p),\n \"should_be_7\")\n\n def test_percentile(self):\n test_a, test_b, test_p = [15, 20], [25, 40, 50], 40\n self.assertEqual(20, find_percentile(test_a, test_b, test_p),\n \"should_be_20\")\n\n\ndef create_random_array(max_length=100, max_number=1000):\n seed(1)\n return sorted(randint(0, randint(1, max_number), randint(1, max_length)))\n\n\ndef create_array(max_length=100, max_number=1000):\n seed(1)\n return sorted(randint(0, max_number, max_length))\n\n\nclass StressTests(unittest.TestCase):\n\n def test_with_naive(self):\n for i in range(100):\n a, b = create_random_array(), create_random_array()\n p = int(randint(1, 100)) % 100\n expected_answer = find_percentile_initial_approach(a, b, p)\n answer = find_percentile(a, b, p)\n self.assertEqual(expected_answer, answer, f\"a={a}, \\n b {b} \\n p {p}\")\n\n\nclass MaxTests(unittest.TestCase):\n\n def test_with_naive(self):\n a, b = create_array(1000000, 10000000), create_array(1000000, 10000000)\n p = int(randint(1, 100)) % 100\n expected_answer = find_percentile_initial_approach(a, b, p)\n answer = find_percentile(a, b, p)\n self.assertEqual(expected_answer, answer, f\"a={a}, \\n b {b} \\n p {p}\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Ryumkin/algorithms_part1","sub_path":"week6/percentile_unittests.py","file_name":"percentile_unittests.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1748736845","text":"import socket # Import socket module\nimport os\nfrom threading import Thread\nclass ClientThread(Thread):\n def __init__(self, ip, port, sock):\n Thread.__init__(self)\n self.ip = ip\n self.port = port\n self.sock = sock\n print(\" New thread started for \"+ip+\":\"+str(port))\n def run(self):\n while True: # Receive the data in small chunks and retransmit it\n req = self.sock.recv(64).decode()\n if(req):\n method, name = req.split(\" \")\n # get\n if(method == \"GET\"):\n try:\n fi = open(name,'rb')\n if(fi):\n self.sock.send(\"OK\\n\".encode())\n c2, addr2 = tcpsockSendData.accept()\n while True:\n data = fi.read(buffer_size)\n while (data):\n c2.send(data)\n #print('Sent ',repr(l))\n data = fi.read(buffer_size)\n if not data:\n fi.close()\n c2.close()\n break;\n finally: \n self.sock.send(\"ERROR\\n\".encode())\n # delete\n elif(method == \"DELETE\"):\n file_path = './' + name\n try:\n os.remove(file_path)\n self.sock.send(\"OK\\n\".encode())\n print(\"da xoa file \", name, \" thanh cong!!\")\n except OSError as e:\n self.sock.send(\"ERROR\\n\".encode())\n print(\"Error: %s : %s\" % (file_path, e.strerror))\n # list\n elif(method == \"LIST\"):\n file_path = './' + name\n try:\n list = os.listdir(file_path)\n self.sock.send(\"OK\\n\".encode())\n c2, addr2 = tcpsockSendData.accept()\n myString = \"-\".join(list)\n c2.send(myString.encode())\n print(\"hien ds trong thu muc \", name, \" thanh cong!!\")\n c2.close();\n finally: \n self.sock.send(\"ERROR\\n\".encode())\n\nhost = \"localhost\"\nport = 8000\nport2 = 8001\ntcpsockSendData = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcpsockSendData.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ntcpsockSendData.bind((host, port2))\n\ntcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ntcpsock.bind((host, port))\n\nthreads = []\nbuffer_size = 1024\n\nwhile True:\n tcpsock.listen(5)\n tcpsockSendData.listen(5)\n print(\"Waiting for incoming connections...\")\n (conn, (ip, port)) = tcpsock.accept()\n print('Got connection from ', (ip, port))\n newthread = ClientThread(ip, port, conn)\n newthread.start()\n threads.append(newthread)","repo_name":"atjpta/ct293","sub_path":"lab6/Cau4_lab5_server.py","file_name":"Cau4_lab5_server.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32999634221","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl=\"http://www.nytimes.com\"\nr=requests.get(url)\nr_html=r.text\nsoup=BeautifulSoup(r_html,'lxml')\ni=1\nfor link in soup.find_all('h3') :\n if link.get_text() != \"Advertisement\": \n print(link.get_text())\n i=i+1 \n","repo_name":"Omar-DAOUDI/python","sub_path":"python/practise_python/DecodeAWebPage.py","file_name":"DecodeAWebPage.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22189315082","text":"import json\nimport os\nfrom multiprocessing import cpu_count\nfrom unittest import mock\n\nimport pytest\nfrom pydantic import ValidationError\n\nfrom framework.config import DatabaseSettings\nfrom framework.config import Settings\n\n\n@pytest.mark.unit\n@mock.patch.dict(os.environ, {}, clear=True)\n@mock.patch(\"framework.config.Settings.Config.secrets_dir\", None)\ndef test_default_settings():\n settings = Settings()\n\n assert settings.DATABASE_URL is None\n assert settings.DB_DRIVER is None\n assert settings.DB_HOST is None\n assert settings.DB_NAME is None\n assert settings.DB_PASSWORD is None\n assert settings.DB_PORT is None\n assert settings.DB_USER is None\n assert settings.HOST == \"localhost\"\n assert settings.MODE_DEBUG is False\n assert settings.PORT == 8000\n assert settings.SENTRY_DSN is None\n\n nr_cpus = 2 * cpu_count() + 1\n assert settings.WEB_CONCURRENCY == nr_cpus\n\n with pytest.raises(ValidationError):\n settings.database_url_from_db_components()\n assert settings.db_components_from_database_url() == DatabaseSettings()\n\n\n@pytest.mark.unit\ndef test_database_url_from_db_components():\n with pytest.raises(ValidationError) as exc_info:\n Settings().database_url_from_db_components()\n err = json.loads(exc_info.value.json())\n assert isinstance(err, list)\n assert err == [\n {\n \"loc\": [\"schema\"],\n \"msg\": \"db driver MUST be set\",\n \"type\": \"value_error\",\n }\n ]\n\n assert (\n Settings(\n DB_DRIVER=\"postgresql+asyncpg\",\n ).database_url_from_db_components()\n == \"postgresql+asyncpg://\"\n )\n\n assert (\n Settings(\n DB_DRIVER=\"sqlite\",\n DB_HOST=\"localhost\",\n ).database_url_from_db_components()\n == \"sqlite://localhost\"\n )\n\n assert (\n Settings(\n DB_DRIVER=\"sqlite\",\n DB_NAME=\":memory:\",\n ).database_url_from_db_components()\n == \"sqlite:///:memory:\"\n )\n\n with pytest.raises(ValidationError) as exc_info:\n Settings(\n DB_DRIVER=\"sqlite\",\n DB_USER=\"user\",\n ).database_url_from_db_components()\n err = json.loads(exc_info.value.json())\n assert isinstance(err, list)\n assert err == [\n {\n \"loc\": [\"schema\"],\n \"msg\": \"netloc MUST be set when userinfo is set\",\n \"type\": \"value_error\",\n }\n ]\n","repo_name":"tgrx/galera","sub_path":"src/framework/tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3320939540","text":"import sys\nimport numpy as np\nimport yt\nfrom scipy.optimize import curve_fit\nfrom yt.fields.api import ValidateParameter\nfrom yt.units import kpc,pc,km,second,yr,Myr,Msun,kilometer,G\nfrom astropy.table import Table , Column ,vstack,hstack\n\ndef _Disk_Radius(field, data):\n center = data.get_field_parameter('center')\n x = data[\"x\"] - center[0]\n y = data[\"y\"] - center[1]\n r = np.sqrt(x*x+y*y)\n return r\ndef _Disk_H(field, data):\n center = data.get_field_parameter('center')\n z = data[\"z\"] - center[2]\n return np.abs(z)\ndef _vc(field,data):\n if data.has_field_parameter(\"bulk_velocity\"):\n bv = data.get_field_parameter(\"bulk_velocity\").in_units(\"cm/s\")\n else:\n bv = data.ds.arr(np.zeros(3), \"cm/s\")\n xv = data[\"gas\",\"velocity_x\"] - bv[0]\n yv = data[\"gas\",\"velocity_y\"] - bv[1]\n center = data.get_field_parameter('center')\n x_hat = data[\"x\"] - center[0]\n y_hat = data[\"y\"] - center[1]\n r = np.sqrt(x_hat*x_hat+y_hat*y_hat)\n x_hat /= r\n y_hat /= r\n\n return (yv*x_hat-xv*y_hat)\ndef _Disk_Angle(field, data):\n center = data.get_field_parameter('center')\n x = data[\"x\"] - center[0]\n y = data[\"y\"] - center[1]\n r = np.arctan2(y,x)\n return r\ndef _vertical_velocity(field,data):\n if data.has_field_parameter(\"bulk_velocity\"):\n bv = data.get_field_parameter(\"bulk_velocity\").in_units(\"cm/s\")\n else:\n bv = data.ds.arr(np.zeros(3), \"cm/s\")\n v = data[\"gas\",\"velocity_z\"] - bv[2]\n return v\nyt.add_field(\"Disk_Radius\",\n function=_Disk_Radius,\n units=\"cm\",\n take_log=False,\n validators=[ValidateParameter('center')])\nyt.add_field(\"Disk_H\",\n function=_Disk_H,\n units=\"pc\",\n take_log=False,\n validators=[ValidateParameter('center')])\nyt.add_field(\"vc\", function=_vc,\n take_log=False, units=r\"km/s\",validators=[ValidateParameter('bulk_velocity')])\nyt.add_field(\"Disk_Angle\",\n function=_Disk_Angle,\n units=\"dimensionless\",\n take_log=False,\n validators=[ValidateParameter('center')])\nyt.add_field(\"vertical_velocity\",\n function=_vertical_velocity,take_log=False, units=r\"km/s\",validators=[ValidateParameter('bulk_velocity')])\n\ndef Smooth(x,y,n=1,b=1):\n NN=len(y[:])\n z=np.zeros(NN)\n d=np.zeros(NN)\n X=np.zeros(NN+2*n)\n Y=np.zeros(NN+2*n)\n\n\n for i in range(n):\n X[n-i-1]=x[0]-(i+1)*(x[1]-x[0])\n Y[n-i-1]=y[0]\n for i in np.arange(NN,NN+2*n,1):\n X[i]=x[NN-1]+(i+1-NN)*(x[1]-x[0])\n Y[i]=y[NN-1]\n count = n\n for xa,xb in zip(x,y):\n X[count]=xa\n Y[count]=xb\n count+=1\n for i in range(len(x)):\n for j in range(2*n+1):\n z[i]=z[i]+np.exp( -(X[i+n]-X[i+j])**2 /(2*b**2) )*Y[i+j]\n d[i]=d[i]+np.exp( -(X[i+n]-X[i+j])**2 /(2*b**2) )\n\n return z/d\ndef Velocity_curve(R,Vo,R1,R2):\n return Vo*np.arctan(R/R1)*np.exp(-R/R2)\ndef add_extremes(X):\n X=np.insert(X,0,0)\n X=np.insert(X,len(X),2*X[-1]-X[-2])\n return X\ndef analytic_model(radius,vel_cir,rbin):\n Redges=np.arange(0*pc,35000*pc,rbin*pc)\n Rcen=0.5*(Redges[1:]+Redges[0:-1])\n N=len(Rcen)\n V_16 = np.zeros(N)\n V_50 = np.zeros(N)\n V_84 = np.zeros(N)\n\n for k in range(N):\n ring=(Redges[k]radius)\n if len(ring[ring])<4:\n V_16[k] = np.nan\n V_50[k] = np.nan\n V_84[k] = np.nan\n else:\n vc_ring=vel_cir[ring]\n V_16[k], V_50[k], V_84[k] = np.percentile(vc_ring,[16,50,84])\n\n Rcen=add_extremes(Rcen)\n V_16=add_extremes(V_16)\n V_50=add_extremes(V_50)\n V_84=add_extremes(V_84)\n\n radius = Rcen[Rcen<20000]\n V_16 = V_16[Rcen<20000]\n V_50 = V_50[Rcen<20000]\n V_84 = V_84[Rcen<20000]\n\n V_err=0.5*(V_84-V_16)\n V_err[0]=V_err[1:].min()\n\n p_0, cov_0 = curve_fit(V_curve,radius,V_50,sigma=V_err,p0=[V_50.max(),np.mean(radius)])\n p_0, cov_0 = curve_fit(Velocity_curve,radius,V_50,sigma=V_err,p0=[p_0[0],p_0[1],p_0[1]])\n return p_0\ndef parameters_rotation_curve(directory,sim):\n ds = yt.load(directory+'/'+sim+'/G-'+sim[-4:])\n Disk = ds.disk('c', [0., 0., 1.],(40, 'kpc'), (1, 'kpc'))\n\n VC = Disk['vc'].in_units(\"pc/Myr\")\n R = Disk['Disk_Radius'].in_units(\"pc\")\n par = analytic_model(R,VC,300)\n np.save('Tables/'+sim+'_rotation',par)\ndef V_curve(R,Vo,R1):\n return Vo*np.arctan(R/R1)\ndef Vorticity_curve(R,Vo,R1,R2):\n result = np.piecewise(R,[R<1e-3,R>=1e-3],[lambda R:2*Vo/R1,\n lambda R:Vo*np.exp(-R/R2)*(R1/(R1**2+R**2) +\n np.arctan(R/R1)*(1.0/R-1.0/R2))])\n\n return result\ndef Kappa_curve(R,Vo,R1,R2):\n result = 2*Omega_curve(R,Vo,R1,R2)*Vorticity_curve(R,Vo,R1,R2)\n return np.sqrt(result)\ndef Omega_curve(R,Vo,R1,R2):\n result = np.piecewise(R,[R<1e-3,R>=1e-3],[lambda R:Vo/R1,\n lambda R:Vo*np.arctan(R/R1)*np.exp(-R/R2)/R])\n return result\ndef sigma_kappa(directory,sim):\n ds = yt.load(directory+'/'+sim+'/G-'+sim[-4:])\n Disk = ds.disk('c', [0., 0., 1.],(25, 'kpc'), (1, 'kpc'))\n L=30*kpc\n grids=ds.refine_by**ds.index.max_level*ds.domain_dimensions[0]\n DX=ds.arr(1, 'code_length')\n DX.convert_to_units('pc')\n RM=DX\n DR=10*DX/grids\n dr=DX/grids\n\n radius=Disk['Disk_Radius'].in_units(\"pc\")\n mass=Disk['cell_mass'].in_units(\"Msun\")\n rho=Disk['density'].in_units(\"Msun/pc**3\")\n angle=Disk['Disk_Angle']\n\n Redges=np.arange(0*pc,20000*pc,DR)\n Rcen=0.5*(Redges[1:]+Redges[0:-1])\n N=len(Rcen)\n\n mass_bin = np.zeros(N)\n sigma_50 = np.zeros(N)\n sigma_16 = np.zeros(N)\n sigma_84 = np.zeros(N)\n\n for k in range(N):\n ring=(Redges[k]radius)\n\n if len(ring[ring])<4:\n mass_bin[k] = np.nan\n sigma_50[k] = np.nan\n sigma_16[k] = np.nan\n sigma_84[k] = np.nan\n\n else:\n weights = rho[ring] # density weights\n mass_bin[k] = mass[ring].sum() # mass in radial bin\n mring = mass[ring]\n\n Nring=len(weights)\n Nang=int(Nring/200)\n\n Aedges = np.linspace(-np.pi,np.pi,Nang)\n Acen = 0.5*(Aedges[1:]+Aedges[0:-1])\n\n #######################\n\n Aedges = np.linspace(-np.pi,np.pi,33)\n Acen = 0.5*(Aedges[1:]+Aedges[0:-1])\n\n m_aux=[]\n aring=angle[ring]\n for jk in range(32):\n section = (Aedges[jk]aring)\n if len(section[section])>1:\n mangle = mring[section].sum()\n mangle /= np.pi*(Redges[k+1]**2-Redges[k]**2)/32\n m_aux.append(mangle)\n m_aux = np.array(m_aux)\n sigma_50[k] = np.median(m_aux)\n sigma_16[k],sigma_84[k] = np.percentile(m_aux,[16,84])\n\n sigma_gas_bin=mass_bin/(2*np.pi*Rcen*DR)\n incorrect=np.isnan(sigma_gas_bin)\n\n sigma_gas_bin[incorrect] = 0.0\n sigma_50[incorrect] = 0.0\n sigma_16[incorrect] = 0.0\n sigma_84[incorrect] = 0.0\n\n ### Smoothing fields ###\n\n sigma_gas_bin = Smooth(Rcen,sigma_gas_bin ,N,DR)\n sigma_50 = Smooth(Rcen,sigma_50 ,N,DR)\n sigma_16 = Smooth(Rcen,sigma_16 ,N,DR)\n sigma_84 = Smooth(Rcen,sigma_84 ,N,DR)\n\n par = np.load('Tables/'+sim+'_rotation.npy')\n\n kappa_bin = Kappa_curve(Rcen,*par)*km/second/pc\n kappa_bin.convert_to_units('1/Myr')\n sigma_gas_bin = sigma_gas_bin*Msun/pc**2\n sigma_50 = sigma_50*Msun/pc**2\n sigma_16 = sigma_16*Msun/pc**2\n sigma_84 = sigma_84*Msun/pc**2\n\n\n tabla=Table()\n tabla['radius']\t = Column(np.array(Rcen))\n tabla['kappa'] = Column(np.array(kappa_bin))\n tabla['sigma_gas'] = Column(np.array(sigma_gas_bin))\n tabla['sigma_16'] = Column(np.array(sigma_16))\n tabla['sigma_50'] = Column(np.array(sigma_50))\n tabla['sigma_84'] = Column(np.array(sigma_84))\n\n\n tabla.write('Tables/'+sim+'_sigma_kappa',path='data',format='hdf5',overwrite=True)\n return 0\n\ncmdarg=sys.argv\n\nsim=cmdarg[-1]\ndirectory=cmdarg[-2]\n\nparameters_rotation_curve(directory,sim)\nsigma_kappa(directory,sim)\n","repo_name":"Jose-Utreras/Mass_transport","sub_path":"Sigma_Kappa.py","file_name":"Sigma_Kappa.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8602907870","text":"# Uses python3\nimport sys\n\ndef get_majority_element(a, left, right):\n if left == right:\n return -1\n if left + 1 == right:\n return a[left]\n #write your code here\n\n max_ele = a[left]\n max_cnt = 1\n ele = a[0]\n cur_cnt = 1\n for i in range(left+1,right):\n if a[i] == ele:\n cur_cnt += 1\n else:\n if cur_cnt > max_cnt:\n max_ele = ele\n max_cnt = cur_cnt\n ele = a[i]\n cur_cnt = 1\n\n if cur_cnt > max_cnt:\n max_ele = ele\n max_cnt = cur_cnt\n \n if max_cnt > (left-right):\n return max_ele\n\n return -1 \n\ndef get_majority_element_divandconq(a, left, right):\n # last tree level\n if (right - left) == 1:\n return a[left]\n else:\n a.sort()\n # split point\n mid = (left + right) // 2\n\n left_maj_elem = get_majority_element(a, left, mid)\n right_maj_elem = get_majority_element(a, mid+1, right)\n\n # define whether there is a majority element for the part of the array\n # majority elements, exclude -1\n maj_elems = (a for a in (left_maj_elem, right_maj_elem) if a != -1)\n for maj_elem in maj_elems:\n cnt = 0\n for i in range(left, right):\n if a[i] == maj_elem:\n cnt += 1\n if cnt > (right - left) / 2:\n return maj_elem\n return -1\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *a = list(map(int, input.split()))\n if get_majority_element_divandconq(a, 0, n) != -1:\n print(1)\n else:\n print(0)\n","repo_name":"juju0111/Data-Structures-and-Algorithms-Specialization","sub_path":"Algorithmic Toolbox/week4/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"189776852","text":"#!/usr/bin/env python\nu\"\"\"\ntest_gia.py (12/2022)\nTests the that GIA model readers are equivalent\n\"\"\"\nimport gzip\nimport time\nimport pytest\nimport shutil\nimport pathlib\nimport numpy as np\nimport gravity_toolkit as gravtk\n\n# PURPOSE: Download ICE-6G GIA model\n@pytest.fixture(scope=\"module\", autouse=True)\ndef download_GIA_model():\n # output GIA file\n GIA_FILE = pathlib.Path('Stokes_trend_High_Res.txt')\n # download GIA model\n HOST = ['https://www.atmosp.physics.utoronto.ca','~peltier','datasets',\n 'Ice6G_C_VM5a','ICE-6G_High_Res_Stokes_trend.txt.gz']\n fid = gravtk.utilities.from_http(HOST, verbose=True)\n # decompress GIA model from virtual BytesIO object\n with gzip.open(fid, 'rb') as f_in, open(GIA_FILE, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n # run tests\n yield\n # clean up\n GIA_FILE.unlink()\n\n# PURPOSE: read ICE-6G GIA test outputs\ndef test_GIA_model_read():\n # output GIA file and type\n GIA_FILE = 'Stokes_trend_High_Res.txt'\n GIA = 'ICE6G-D'\n # read GIA model\n Ylms = gravtk.read_GIA_model(GIA_FILE, GIA=GIA)\n # assert input GIA values\n assert Ylms['clm'][2,0] == 1.43961238E-11\n assert Ylms['clm'][3,0] == 1.52009079E-12\n assert Ylms['slm'][3,1] == -8.05198489E-12\n # assert parameters\n assert Ylms['title'] == 'ICE6G-D_High_Res'\n\n# PURPOSE: read ICE-6G GIA model and test harmonic outputs\ndef test_GIA_model_harmonics():\n # output GIA file and type\n GIA_FILE = 'Stokes_trend_High_Res.txt'\n GIA = 'ICE6G-D'\n # degree of truncation\n LMAX,MMAX = (60, 30)\n # read GIA model\n Ylms = gravtk.gia(lmax=LMAX).from_GIA(GIA_FILE, GIA=GIA, mmax=MMAX)\n # assert input GIA values\n assert Ylms.clm[2,0] == 1.43961238E-11\n assert Ylms.clm[3,0] == 1.52009079E-12\n assert Ylms.slm[3,1] == -8.05198489E-12\n # assert parameters\n assert Ylms.title == 'ICE6G-D_High_Res'\n # assert truncation\n assert Ylms.lmax == LMAX\n assert Ylms.l[-1] == LMAX\n assert Ylms.mmax == MMAX\n assert Ylms.m[-1] == MMAX\n\n# PURPOSE: read ICE-6G GIA model and compare drift estimates\ndef test_GIA_model_drift_estimate():\n # output GIA file and type\n GIA_FILE = 'Stokes_trend_High_Res.txt'\n GIA = 'ICE6G-D'\n # degree and order of truncation\n LMAX,MMAX = (60, 30)\n # synthetic time estimate\n now = time.gmtime()\n tdec = np.arange(2002, now.tm_year+1, 1.0/12.0)\n epoch = 2003.3\n # read GIA model\n GIA_Ylms_rate = gravtk.read_GIA_model(GIA_FILE, GIA=GIA, LMAX=LMAX, MMAX=MMAX)\n # calculate the monthly mass change from GIA\n GIA_Ylms = gravtk.harmonics(lmax=LMAX, mmax=MMAX)\n GIA_Ylms.time = np.copy(tdec)\n GIA_Ylms.month = gravtk.time.calendar_to_grace(tdec)\n GIA_Ylms.month = gravtk.time.adjust_months(GIA_Ylms.month)\n # allocate for output harmonics\n GIA_Ylms.clm = np.zeros((GIA_Ylms.lmax+1, GIA_Ylms.mmax+1, len(tdec)))\n GIA_Ylms.slm = np.zeros((GIA_Ylms.lmax+1, GIA_Ylms.mmax+1, len(tdec)))\n # assert input GIA values\n # monthly GIA calculated by gia_rate*time elapsed\n # finding change in GIA each month\n for i,t in enumerate(tdec):\n GIA_Ylms.clm[:,:,i] = GIA_Ylms_rate['clm']*(t - epoch)\n GIA_Ylms.slm[:,:,i] = GIA_Ylms_rate['slm']*(t - epoch)\n # read GIA model and calculate drift from harmonics class\n Ylms = gravtk.gia(lmax=LMAX).from_GIA(\n GIA_FILE, GIA=GIA, mmax=MMAX).drift(tdec, epoch=epoch)\n # assert that spherical harmonics are equal\n assert np.all(GIA_Ylms.clm == Ylms.clm)\n assert np.all(GIA_Ylms.slm == Ylms.slm)\n # assert time variables are equal\n assert np.all(GIA_Ylms.time == Ylms.time)\n assert np.all(GIA_Ylms.month == Ylms.month)\n","repo_name":"tsutterley/gravity-toolkit","sub_path":"test/test_gia.py","file_name":"test_gia.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"12"} +{"seq_id":"22681031028","text":"import argbind\nfrom pathlib import Path\nimport yaml\nfrom typing import List\n\n\n\n\n\"\"\"example output: (yaml)\n\n\"\"\"\n\n@argbind.bind(without_prefix=True, positional=True)\ndef fine_tune(audio_files_or_folders: List[str], name: str):\n\n conf_dir = Path(\"conf\")\n assert conf_dir.exists(), \"conf directory not found. are you in the vampnet directory?\"\n\n conf_dir = conf_dir / \"generated\"\n conf_dir.mkdir(exist_ok=True)\n\n finetune_dir = conf_dir / name\n finetune_dir.mkdir(exist_ok=True)\n\n finetune_c2f_conf = {\n \"$include\": [\"conf/lora/lora.yml\"],\n \"fine_tune\": True,\n \"train/AudioLoader.sources\": audio_files_or_folders,\n \"val/AudioLoader.sources\": audio_files_or_folders,\n \"VampNet.n_codebooks\": 14,\n \"VampNet.n_conditioning_codebooks\": 4,\n \"VampNet.embedding_dim\": 1280,\n \"VampNet.n_layers\": 16,\n \"VampNet.n_heads\": 20,\n \"AudioDataset.duration\": 3.0,\n \"AudioDataset.loudness_cutoff\": -40.0,\n \"save_path\": f\"./runs/{name}/c2f\",\n \"fine_tune_checkpoint\": \"./models/vampnet/c2f.pth\"\n }\n\n finetune_coarse_conf = {\n \"$include\": [\"conf/lora/lora.yml\"],\n \"fine_tune\": True,\n \"train/AudioLoader.sources\": audio_files_or_folders,\n \"val/AudioLoader.sources\": audio_files_or_folders,\n \"save_path\": f\"./runs/{name}/coarse\",\n \"fine_tune_checkpoint\": \"./models/vampnet/coarse.pth\"\n }\n\n interface_conf = {\n \"Interface.coarse_ckpt\": f\"./runs/{name}/coarse/latest/vampnet/weights.pth\",\n\n \"Interface.coarse2fine_ckpt\": f\"./runs/{name}/c2f/latest/vampnet/weights.pth\",\n \"Interface.wavebeat_ckpt\": \"./models/wavebeat.pth\",\n\n \"Interface.codec_ckpt\": \"./models/vampnet/codec.pth\",\n \"AudioLoader.sources\": [audio_files_or_folders],\n }\n\n # save the confs\n with open(finetune_dir / \"c2f.yml\", \"w\") as f:\n yaml.dump(finetune_c2f_conf, f)\n\n with open(finetune_dir / \"coarse.yml\", \"w\") as f:\n yaml.dump(finetune_coarse_conf, f)\n \n with open(finetune_dir / \"interface.yml\", \"w\") as f: \n yaml.dump(interface_conf, f)\n\n\n print(f\"generated confs in {finetune_dir}. run training jobs with `python scripts/exp/train.py --args.load {finetune_dir}/.yml` \")\n\nif __name__ == \"__main__\":\n args = argbind.parse_args()\n\n with argbind.scope(args):\n fine_tune()\n\n\n\n ","repo_name":"hugofloresgarcia/vampnet","sub_path":"scripts/exp/fine_tune.py","file_name":"fine_tune.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"12"}